Depends on D151397.
This patch follows the patch-set of D151395. This patch seeks to
update all the remaining fixed-point intrinsics to model vxrm
control, adding rounding mode control for `vsmul`, `vssra`,
`vssrl`, `vnclip`, and `vnclipu`.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D152879
[["vv", "v", "vvUv"],
["vx", "v", "vvz"]]>;
+multiclass RVVSignedShiftBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvUvu"],
+ ["vx", "v", "vvzu"]]>;
+
multiclass RVVUnsignedShiftBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "csil",
[["vv", "Uv", "UvUvUv"],
["vx", "Uv", "UvUvz"]]>;
+multiclass RVVUnsignedShiftBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUvu"],
+ ["vx", "Uv", "UvUvzu"]]>;
+
multiclass RVVShiftBuiltinSet
: RVVSignedShiftBuiltinSet,
RVVUnsignedShiftBuiltinSet;
: RVVOutOp0Op1BuiltinSet<NAME, "csil",
[["wv", "v", "vwUv"],
["wx", "v", "vwz"]]>;
+
+ multiclass RVVSignedNShiftBuiltinSetRoundingMode
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "v", "vwUvu"],
+ ["wx", "v", "vwzu"]]>;
+
multiclass RVVUnsignedNShiftBuiltinSet
: RVVOutOp0Op1BuiltinSet<NAME, "csil",
[["wv", "Uv", "UvUwUv"],
["wx", "Uv", "UvUwz"]]>;
+
+ multiclass RVVUnsignedNShiftBuiltinSetRoundingMode
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "Uv", "UvUwUvu"],
+ ["wx", "Uv", "UvUwzu"]]>;
+
}
multiclass RVVCarryinBuiltinSet
defm vssubu : RVVUnsignedBinBuiltinSet;
defm vssub : RVVSignedBinBuiltinSet;
-// 13.2. Vector Single-Width Averaging Add and Subtract
let ManualCodegen = [{
{
// LLVM intrinsic
return Builder.CreateCall(F, Operands, "");
}
}] in {
+ // 13.2. Vector Single-Width Averaging Add and Subtract
defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode;
defm vaadd : RVVSignedBinBuiltinSetRoundingMode;
defm vasubu : RVVUnsignedBinBuiltinSetRoundingMode;
defm vasub : RVVSignedBinBuiltinSetRoundingMode;
-}
-// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
-let RequiredFeatures = ["FullMultiply"] in {
-defm vsmul : RVVSignedBinBuiltinSet;
+ // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
+ let RequiredFeatures = ["FullMultiply"] in {
+ defm vsmul : RVVSignedBinBuiltinSetRoundingMode;
+ }
+
+ // 13.4. Vector Single-Width Scaling Shift Instructions
+ defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode;
+ defm vssra : RVVSignedShiftBuiltinSetRoundingMode;
}
-// 13.4. Vector Single-Width Scaling Shift Instructions
-defm vssrl : RVVUnsignedShiftBuiltinSet;
-defm vssra : RVVSignedShiftBuiltinSet;
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
-// 13.5. Vector Narrowing Fixed-Point Clip Instructions
-defm vnclipu : RVVUnsignedNShiftBuiltinSet;
-defm vnclip : RVVSignedNShiftBuiltinSet;
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ Operands.push_back(Ops[Offset + 2]); // vxrm
+ Operands.push_back(Ops[Offset + 3]); // vl
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ // 13.5. Vector Narrowing Fixed-Point Clip Instructions
+ defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode;
+ defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode;
+}
// 14. Vector Floating-Point Instructions
// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
case RISCVVector::BI__builtin_rvv_vasubu_vx_ta:
case RISCVVector::BI__builtin_rvv_vasub_vv_ta:
case RISCVVector::BI__builtin_rvv_vasub_vx_ta:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_ta:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_ta:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_ta:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_ta:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_ta:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_ta:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_ta:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_ta:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_ta:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_ta:
return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
case RISCVVector::BI__builtin_rvv_vaaddu_vv_tama:
case RISCVVector::BI__builtin_rvv_vaaddu_vx_tama:
case RISCVVector::BI__builtin_rvv_vaadd_vv_tama:
case RISCVVector::BI__builtin_rvv_vasubu_vx_tama:
case RISCVVector::BI__builtin_rvv_vasub_vv_tama:
case RISCVVector::BI__builtin_rvv_vasub_vx_tama:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tama:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tama:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tama:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tama:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tama:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tama:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tama:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tama:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tama:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tama:
return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3);
case RISCV::BI__builtin_riscv_ntl_load:
case RISCV::BI__builtin_riscv_ntl_store:
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf8(op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf8(op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf4(op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf4(op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf2(op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf2(op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m1(op1, shift, vl);
+ return __riscv_vnclip_wv_i8m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m1(op1, shift, vl);
+ return __riscv_vnclip_wx_i8m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m2(op1, shift, vl);
+ return __riscv_vnclip_wv_i8m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m2(op1, shift, vl);
+ return __riscv_vnclip_wx_i8m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m4(op1, shift, vl);
+ return __riscv_vnclip_wv_i8m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m4(op1, shift, vl);
+ return __riscv_vnclip_wx_i8m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf4(op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf4(op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf2(op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf2(op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m1(op1, shift, vl);
+ return __riscv_vnclip_wv_i16m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m1(op1, shift, vl);
+ return __riscv_vnclip_wx_i16m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m2(op1, shift, vl);
+ return __riscv_vnclip_wv_i16m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m2(op1, shift, vl);
+ return __riscv_vnclip_wx_i16m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m4(op1, shift, vl);
+ return __riscv_vnclip_wv_i16m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m4(op1, shift, vl);
+ return __riscv_vnclip_wx_i16m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32mf2(op1, shift, vl);
+ return __riscv_vnclip_wv_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32mf2(op1, shift, vl);
+ return __riscv_vnclip_wx_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m1(op1, shift, vl);
+ return __riscv_vnclip_wv_i32m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m1(op1, shift, vl);
+ return __riscv_vnclip_wx_i32m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m2(op1, shift, vl);
+ return __riscv_vnclip_wv_i32m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m2(op1, shift, vl);
+ return __riscv_vnclip_wx_i32m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m4(op1, shift, vl);
+ return __riscv_vnclip_wv_i32m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m4(op1, shift, vl);
+ return __riscv_vnclip_wx_i32m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf8_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf8_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m1_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m1_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m1_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m1_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m1_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m1_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m2_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m4_m(mask, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf8(op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf8(op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf4(op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf4(op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf2(op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf2(op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m1(op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m1(op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m2(op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m2(op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m4(op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m4(op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf4(op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf4(op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf2(op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf2(op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m1(op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m1(op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m2(op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m2(op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m4(op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m4(op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32mf2(op1, shift, vl);
+ return __riscv_vnclipu_wv_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32mf2(op1, shift, vl);
+ return __riscv_vnclipu_wx_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m1(op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m1(op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m2(op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m2(op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m4(op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m4(op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf8_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf8_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m1_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m1_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m1_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m1_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32mf2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m1_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m1_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m2_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m4_m(mask, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf8(op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf8(op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf4(op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf4(op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf2(op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf2(op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m1(op1, op2, vl);
+ return __riscv_vsmul_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m1(op1, op2, vl);
+ return __riscv_vsmul_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m2(op1, op2, vl);
+ return __riscv_vsmul_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m2(op1, op2, vl);
+ return __riscv_vsmul_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m4(op1, op2, vl);
+ return __riscv_vsmul_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m4(op1, op2, vl);
+ return __riscv_vsmul_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m8(op1, op2, vl);
+ return __riscv_vsmul_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m8(op1, op2, vl);
+ return __riscv_vsmul_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf4(op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf4(op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf2(op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf2(op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m1(op1, op2, vl);
+ return __riscv_vsmul_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m1(op1, op2, vl);
+ return __riscv_vsmul_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m2(op1, op2, vl);
+ return __riscv_vsmul_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m2(op1, op2, vl);
+ return __riscv_vsmul_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m4(op1, op2, vl);
+ return __riscv_vsmul_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m4(op1, op2, vl);
+ return __riscv_vsmul_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m8(op1, op2, vl);
+ return __riscv_vsmul_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m8(op1, op2, vl);
+ return __riscv_vsmul_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32mf2(op1, op2, vl);
+ return __riscv_vsmul_vv_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32mf2(op1, op2, vl);
+ return __riscv_vsmul_vx_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m1(op1, op2, vl);
+ return __riscv_vsmul_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m1(op1, op2, vl);
+ return __riscv_vsmul_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m2(op1, op2, vl);
+ return __riscv_vsmul_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m2(op1, op2, vl);
+ return __riscv_vsmul_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m4(op1, op2, vl);
+ return __riscv_vsmul_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m4(op1, op2, vl);
+ return __riscv_vsmul_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m8(op1, op2, vl);
+ return __riscv_vsmul_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m8(op1, op2, vl);
+ return __riscv_vsmul_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m1(op1, op2, vl);
+ return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m1(op1, op2, vl);
+ return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m2(op1, op2, vl);
+ return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m2(op1, op2, vl);
+ return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m4(op1, op2, vl);
+ return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m4(op1, op2, vl);
+ return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m8(op1, op2, vl);
+ return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m8(op1, op2, vl);
+ return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32mf2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32mf2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf8(op1, shift, vl);
+ return __riscv_vssra_vv_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf8(op1, shift, vl);
+ return __riscv_vssra_vx_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf4(op1, shift, vl);
+ return __riscv_vssra_vv_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf4(op1, shift, vl);
+ return __riscv_vssra_vx_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf2(op1, shift, vl);
+ return __riscv_vssra_vv_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf2(op1, shift, vl);
+ return __riscv_vssra_vx_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m1(op1, shift, vl);
+ return __riscv_vssra_vv_i8m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m1(op1, shift, vl);
+ return __riscv_vssra_vx_i8m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m2(op1, shift, vl);
+ return __riscv_vssra_vv_i8m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m2(op1, shift, vl);
+ return __riscv_vssra_vx_i8m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m4(op1, shift, vl);
+ return __riscv_vssra_vv_i8m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m4(op1, shift, vl);
+ return __riscv_vssra_vx_i8m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m8(op1, shift, vl);
+ return __riscv_vssra_vv_i8m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m8(op1, shift, vl);
+ return __riscv_vssra_vx_i8m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf4(op1, shift, vl);
+ return __riscv_vssra_vv_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf4(op1, shift, vl);
+ return __riscv_vssra_vx_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf2(op1, shift, vl);
+ return __riscv_vssra_vv_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf2(op1, shift, vl);
+ return __riscv_vssra_vx_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m1(op1, shift, vl);
+ return __riscv_vssra_vv_i16m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m1(op1, shift, vl);
+ return __riscv_vssra_vx_i16m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m2(op1, shift, vl);
+ return __riscv_vssra_vv_i16m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m2(op1, shift, vl);
+ return __riscv_vssra_vx_i16m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m4(op1, shift, vl);
+ return __riscv_vssra_vv_i16m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m4(op1, shift, vl);
+ return __riscv_vssra_vx_i16m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m8(op1, shift, vl);
+ return __riscv_vssra_vv_i16m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m8(op1, shift, vl);
+ return __riscv_vssra_vx_i16m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32mf2(op1, shift, vl);
+ return __riscv_vssra_vv_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32mf2(op1, shift, vl);
+ return __riscv_vssra_vx_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m1(op1, shift, vl);
+ return __riscv_vssra_vv_i32m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m1(op1, shift, vl);
+ return __riscv_vssra_vx_i32m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m2(op1, shift, vl);
+ return __riscv_vssra_vv_i32m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m2(op1, shift, vl);
+ return __riscv_vssra_vx_i32m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m4(op1, shift, vl);
+ return __riscv_vssra_vv_i32m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m4(op1, shift, vl);
+ return __riscv_vssra_vx_i32m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m8(op1, shift, vl);
+ return __riscv_vssra_vv_i32m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m8(op1, shift, vl);
+ return __riscv_vssra_vx_i32m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m1(op1, shift, vl);
+ return __riscv_vssra_vv_i64m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m1(op1, shift, vl);
+ return __riscv_vssra_vx_i64m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m2(op1, shift, vl);
+ return __riscv_vssra_vv_i64m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m2(op1, shift, vl);
+ return __riscv_vssra_vx_i64m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m4(op1, shift, vl);
+ return __riscv_vssra_vv_i64m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m4(op1, shift, vl);
+ return __riscv_vssra_vx_i64m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m8(op1, shift, vl);
+ return __riscv_vssra_vv_i64m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m8(op1, shift, vl);
+ return __riscv_vssra_vx_i64m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m1_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m1_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m1_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m1_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32mf2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32mf2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m1_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m1_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m1_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m1_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m2_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m4_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vv_i64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m8_m(mask, op1, shift, vl);
+ return __riscv_vssra_vx_i64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf8(op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf8(op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf4(op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf4(op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf2(op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf2(op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m1(op1, shift, vl);
+ return __riscv_vssrl_vv_u8m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m1(op1, shift, vl);
+ return __riscv_vssrl_vx_u8m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m2(op1, shift, vl);
+ return __riscv_vssrl_vv_u8m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m2(op1, shift, vl);
+ return __riscv_vssrl_vx_u8m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m4(op1, shift, vl);
+ return __riscv_vssrl_vv_u8m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m4(op1, shift, vl);
+ return __riscv_vssrl_vx_u8m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m8(op1, shift, vl);
+ return __riscv_vssrl_vv_u8m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m8(op1, shift, vl);
+ return __riscv_vssrl_vx_u8m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf4(op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf4(op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf2(op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf2(op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m1(op1, shift, vl);
+ return __riscv_vssrl_vv_u16m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m1(op1, shift, vl);
+ return __riscv_vssrl_vx_u16m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m2(op1, shift, vl);
+ return __riscv_vssrl_vv_u16m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m2(op1, shift, vl);
+ return __riscv_vssrl_vx_u16m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m4(op1, shift, vl);
+ return __riscv_vssrl_vv_u16m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m4(op1, shift, vl);
+ return __riscv_vssrl_vx_u16m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m8(op1, shift, vl);
+ return __riscv_vssrl_vv_u16m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m8(op1, shift, vl);
+ return __riscv_vssrl_vx_u16m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32mf2(op1, shift, vl);
+ return __riscv_vssrl_vv_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32mf2(op1, shift, vl);
+ return __riscv_vssrl_vx_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m1(op1, shift, vl);
+ return __riscv_vssrl_vv_u32m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m1(op1, shift, vl);
+ return __riscv_vssrl_vx_u32m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m2(op1, shift, vl);
+ return __riscv_vssrl_vv_u32m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m2(op1, shift, vl);
+ return __riscv_vssrl_vx_u32m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m4(op1, shift, vl);
+ return __riscv_vssrl_vv_u32m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m4(op1, shift, vl);
+ return __riscv_vssrl_vx_u32m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m8(op1, shift, vl);
+ return __riscv_vssrl_vv_u32m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m8(op1, shift, vl);
+ return __riscv_vssrl_vx_u32m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m1(op1, shift, vl);
+ return __riscv_vssrl_vv_u64m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m1(op1, shift, vl);
+ return __riscv_vssrl_vx_u64m1(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m2(op1, shift, vl);
+ return __riscv_vssrl_vv_u64m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m2(op1, shift, vl);
+ return __riscv_vssrl_vx_u64m2(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m4(op1, shift, vl);
+ return __riscv_vssrl_vv_u64m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m4(op1, shift, vl);
+ return __riscv_vssrl_vx_u64m4(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m8(op1, shift, vl);
+ return __riscv_vssrl_vv_u64m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m8(op1, shift, vl);
+ return __riscv_vssrl_vx_u64m8(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m1_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m1_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m1_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m1_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32mf2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32mf2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m1_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m1_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m1_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m1_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m2_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m4_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m8_m(mask, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(op1, shift, vl);
+ return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip(mask, op1, shift, vl);
+ return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(op1, shift, vl);
+ return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu(mask, op1, shift, vl);
+ return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(op1, shift, vl);
+ return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra(mask, op1, shift, vl);
+ return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(op1, shift, vl);
+ return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_m
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_m
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_m
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_m
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_m
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_m
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_m
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl(mask, op1, shift, vl);
+ return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i8m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i8m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i16m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i16m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_wv_i32m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wv_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_wx_i32m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_wx_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u8m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u8m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u16m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u16m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_wv_u32m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wv_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_wx_u32m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_wx_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m1_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m1_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m1_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m1_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32mf2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32mf2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m1_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m1_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m1_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m1_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m2_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m4_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m8_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m1_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m1_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m1_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m1_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i8m8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_vx_i8m8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i8m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i8m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i16m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i16m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i32m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i32m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_vv_i64m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vv_i64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_vx_i64m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_vx_i64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32mf2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m1_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m2_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m4_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m8_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m1_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m2_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m4_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m8_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u8m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u8m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u16m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u16m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u32m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u32m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m1_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m2_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m4_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_vv_u64m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vv_u64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_vx_u64m8_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_vx_u64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wv_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclip_wx_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wv_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclip_wx_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wv_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclip_wx_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wv_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclip_wx_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wv_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclip_wx_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wv_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclip_wx_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wv_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclip_wx_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wv_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclip_wx_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wv_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclip_wx_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wv_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclip_wx_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wv_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclip_wx_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wv_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclip_wx_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wv_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclip_wx_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wv_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclip_wx_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wv_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclip_wx_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclip_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tu(maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wv_u8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vnclipu_wx_u8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wv_u8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vnclipu_wx_u8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wv_u8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vnclipu_wx_u8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wv_u8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vnclipu_wx_u8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wv_u8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vnclipu_wx_u8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wv_u8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vnclipu_wx_u8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wv_u16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vnclipu_wx_u16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wv_u16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vnclipu_wx_u16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wv_u16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vnclipu_wx_u16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wv_u16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vnclipu_wx_u16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wv_u16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vnclipu_wx_u16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wv_u32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vnclipu_wx_u32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wv_u32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vnclipu_wx_u32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wv_u32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vnclipu_wx_u32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wv_u32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vnclipu_wx_u32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tu(maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tum(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vv_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vsmul_vx_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i8 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vv_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vsmul_vx_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i8 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vv_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vsmul_vx_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i8 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vv_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vsmul_vx_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i8 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vv_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vsmul_vx_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i8 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vv_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vsmul_vx_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i8 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vv_i8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vsmul_vx_i8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i8 [[OP2]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vv_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vsmul_vx_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i16 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vv_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vsmul_vx_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i16 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vv_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vsmul_vx_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i16 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vv_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vsmul_vx_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i16 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vv_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vsmul_vx_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i16 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vv_i16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vsmul_vx_i16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i16 [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vv_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsmul_vx_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i32 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vv_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsmul_vx_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i32 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vv_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsmul_vx_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i32 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vv_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsmul_vx_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i32 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vv_i32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsmul_vx_i32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i32 [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vv_i64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsmul_vx_i64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vv_i64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsmul_vx_i64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vv_i64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsmul_vx_i64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vv_i64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsmul_vx_i64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_mu(mask, maskedoff, op1, op2, vl);
+ return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vv_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssra_vx_i8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vv_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssra_vx_i8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vv_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssra_vx_i8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vv_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssra_vx_i8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vv_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssra_vx_i8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vv_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssra_vx_i8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vv_i8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssra_vx_i8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vv_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssra_vx_i16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vv_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssra_vx_i16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vv_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssra_vx_i16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vv_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssra_vx_i16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vv_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssra_vx_i16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vv_i16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssra_vx_i16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vv_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssra_vx_i32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vv_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssra_vx_i32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vv_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssra_vx_i32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vv_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssra_vx_i32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vv_i32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssra_vx_i32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vv_i64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssra_vx_i64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vv_i64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssra_vx_i64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vv_i64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssra_vx_i64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vv_i64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssra_vx_i64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssra_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], i64 0, i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tu(maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_tum
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_tum
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_tum
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_tum
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_tum
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_tum
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_tum
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tum(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_tumu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_tumu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_tumu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_tumu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_tumu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_tumu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_tumu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vv_u8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], <vscale x 1 x i8> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vssrl_vx_u8mf8_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vv_u8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], <vscale x 2 x i8> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vssrl_vx_u8mf4_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vv_u8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], <vscale x 4 x i8> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vssrl_vx_u8mf2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vv_u8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], <vscale x 8 x i8> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vssrl_vx_u8m1_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vv_u8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], <vscale x 16 x i8> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vssrl_vx_u8m2_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vv_u8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], <vscale x 32 x i8> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vssrl_vx_u8m4_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vv_u8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], <vscale x 64 x i8> [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vssrl_vx_u8m8_mu
// CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[OP1]], i64 [[SHIFT]], <vscale x 64 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vv_u16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], <vscale x 1 x i16> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vssrl_vx_u16mf4_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vv_u16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], <vscale x 2 x i16> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vssrl_vx_u16mf2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vv_u16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], <vscale x 4 x i16> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vssrl_vx_u16m1_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vv_u16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], <vscale x 8 x i16> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vssrl_vx_u16m2_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vv_u16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], <vscale x 16 x i16> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vssrl_vx_u16m4_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vv_u16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], <vscale x 32 x i16> [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vssrl_vx_u16m8_mu
// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[OP1]], i64 [[SHIFT]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vv_u32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], <vscale x 1 x i32> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vssrl_vx_u32mf2_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vv_u32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], <vscale x 2 x i32> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vssrl_vx_u32m1_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vv_u32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], <vscale x 4 x i32> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vssrl_vx_u32m2_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vv_u32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], <vscale x 8 x i32> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vssrl_vx_u32m4_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vv_u32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], <vscale x 16 x i32> [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vssrl_vx_u32m8_mu
// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[OP1]], i64 [[SHIFT]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vv_u64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], <vscale x 1 x i64> [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vssrl_vx_u64m1_mu
// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vv_u64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], <vscale x 2 x i64> [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vssrl_vx_u64m2_mu
// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vv_u64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], <vscale x 4 x i64> [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vssrl_vx_u64m4_mu
// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vv_u64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], <vscale x 8 x i64> [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vssrl_vx_u64m8_mu
// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 noundef [[SHIFT:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[OP1]], i64 [[SHIFT]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
- return __riscv_vssrl_mu(mask, maskedoff, op1, shift, vl);
+ return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl);
}
--- /dev/null
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wv_i32m1(op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wx_i32m1(op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wv_i32m1_m(mask, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wx_i32m1_m(mask, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wv_i32m1_tu(maskedoff, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wx_i32m1_tu(maskedoff, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, 5, vl);
+}
--- /dev/null
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wv_u32m1(op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wx_u32m1(op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wv_u32m1_m(mask, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wx_u32m1_m(mask, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wv_u32m1_tu(maskedoff, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wx_u32m1_tu(maskedoff, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, 5, vl);
+}
+
+vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, 5, vl);
+}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(op1, op2, vl);
+ return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul(mask, op1, op2, vl);
+ return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m1(op1, op2, vl);
+ return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m1(op1, op2, vl);
+ return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m2(op1, op2, vl);
+ return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m2(op1, op2, vl);
+ return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m4(op1, op2, vl);
+ return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m4(op1, op2, vl);
+ return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m8(op1, op2, vl);
+ return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m8(op1, op2, vl);
+ return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
- return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, vl);
+ return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl);
}
--- /dev/null
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vv_i32m1(op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vx_i32m1(op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vv_i32m1_tu(maskedoff, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vx_i32m1_tu(maskedoff, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vv_i32m1_tum(
+ vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, 5, vl);
+}
--- /dev/null
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1(op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1(op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_m(mask, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_m(mask, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_tum(
+ vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, op2, 5, vl);
+}
--- /dev/null
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN: -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1(op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1(op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_m(mask, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_m(mask, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_tum(
+ vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ // expected-error@+1 {{argument value 5 is outside the valid range [0, 3]}}
+ return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, op2, 5, vl);
+}
let VLOperand = 4;
}
// For Saturating binary operations.
+ // The destination vector type is the same as first source vector.
+ // The second source operand matches the destination type or is an XLen scalar.
+ // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
+ class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+ llvm_anyint_ty, LLVMMatchType<2>],
+ [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
+ RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
+ // For Saturating binary operations with mask.
+ // The destination vector type is the same as first source vector.
+ // The second source operand matches the destination type or is an XLen scalar.
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
+ class RISCVSaturatingBinaryAAShiftMaskedRoundingMode
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+ LLVMMatchType<2>, LLVMMatchType<2>],
+ [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem, IntrHasSideEffects]>,
+ RISCVVIntrinsic {
+ let VLOperand = 6;
+ }
+ // For Saturating binary operations.
// The destination vector type is NOT the same as first source vector.
// The second source operand matches the destination type or is an XLen scalar.
// Input: (passthru, vector_in, vector_in/scalar_in, vl)
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
let VLOperand = 4;
}
+ // For Saturating binary operations.
+ // The destination vector type is NOT the same as first source vector.
+ // The second source operand matches the destination type or is an XLen scalar.
+ // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
+ class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
+ llvm_anyint_ty, LLVMMatchType<3>],
+ [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
+ RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
+ // For Saturating binary operations with mask.
+ // The destination vector type is NOT the same as first source vector (with mask).
+ // The second source operand matches the destination type or is an XLen scalar.
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
+ class RISCVSaturatingBinaryABShiftMaskedRoundingMode
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+ LLVMMatchType<3>, LLVMMatchType<3>],
+ [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem,
+ IntrHasSideEffects]>, RISCVVIntrinsic {
+ let VLOperand = 5;
+ }
// Input: (vector_in, vector_in, scalar_in, vl, policy)
class RVVSlideUnMasked
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode;
def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode;
}
- multiclass RISCVSaturatingBinaryAAShift {
- def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMasked;
- def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMasked;
+ multiclass RISCVSaturatingBinaryAAShiftRoundingMode {
+ def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode;
+ def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode;
}
- multiclass RISCVSaturatingBinaryABShift {
- def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked;
- def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked;
+ multiclass RISCVSaturatingBinaryABShiftRoundingMode {
+ def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode;
+ def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode;
}
multiclass RVVSlide {
def "int_riscv_" # NAME : RVVSlideUnMasked;
defm vasubu : RISCVSaturatingBinaryAAXRoundingMode;
defm vasub : RISCVSaturatingBinaryAAXRoundingMode;
- defm vsmul : RISCVSaturatingBinaryAAX;
+ defm vsmul : RISCVSaturatingBinaryAAXRoundingMode;
- defm vssrl : RISCVSaturatingBinaryAAShift;
- defm vssra : RISCVSaturatingBinaryAAShift;
+ defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode;
+ defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode;
- defm vnclipu : RISCVSaturatingBinaryABShift;
- defm vnclip : RISCVSaturatingBinaryABShift;
+ defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
+ defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;
defm vmfeq : RISCVCompare;
defm vmfne : RISCVCompare;
defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
+multiclass VPseudoBinaryV_VI_RM<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
+ defm _VI : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, ImmType, m, Constraint>;
+}
+
multiclass VPseudoVALU_MM {
foreach m = MxList in {
defvar mx = m.MX;
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
+multiclass VPseudoBinaryV_WV_RM<LMULInfo m> {
+ defm _WV : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, m.vrclass, m,
+ !if(!ge(m.octuple, 8),
+ "@earlyclobber $rd", "")>;
+}
+
multiclass VPseudoBinaryV_WX<LMULInfo m> {
defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
+multiclass VPseudoBinaryV_WX_RM<LMULInfo m> {
+ defm _WX : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, GPR, m,
+ !if(!ge(m.octuple, 8),
+ "@earlyclobber $rd", "")>;
+}
+
multiclass VPseudoBinaryV_WI<LMULInfo m> {
defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
+multiclass VPseudoBinaryV_WI_RM<LMULInfo m> {
+ defm _WI : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, uimm5, m,
+ !if(!ge(m.octuple, 8),
+ "@earlyclobber $rd", "")>;
+}
+
// For vadc and vsbc, the instruction encoding is reserved if the destination
// vector register is v0.
// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
}
}
-multiclass VPseudoVSSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVSSHT_VV_VX_VI_RM<Operand ImmType = simm5, string Constraint = ""> {
foreach m = MxList in {
defvar mx = m.MX;
defvar WriteVSShiftV_MX = !cast<SchedWrite>("WriteVSShiftV_" # mx);
defvar ReadVSShiftV_MX = !cast<SchedRead>("ReadVSShiftV_" # mx);
defvar ReadVSShiftX_MX = !cast<SchedRead>("ReadVSShiftX_" # mx);
- defm "" : VPseudoBinaryV_VV<m, Constraint>,
+ defm "" : VPseudoBinaryV_VV_RM<m, Constraint>,
Sched<[WriteVSShiftV_MX, ReadVSShiftV_MX, ReadVSShiftV_MX, ReadVMask]>;
- defm "" : VPseudoBinaryV_VX<m, Constraint>,
+ defm "" : VPseudoBinaryV_VX_RM<m, Constraint>,
Sched<[WriteVSShiftX_MX, ReadVSShiftV_MX, ReadVSShiftX_MX, ReadVMask]>;
- defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
+ defm "" : VPseudoBinaryV_VI_RM<ImmType, m, Constraint>,
Sched<[WriteVSShiftI_MX, ReadVSShiftV_MX, ReadVMask]>;
}
}
}
}
-multiclass VPseudoVSMUL_VV_VX {
+multiclass VPseudoVSMUL_VV_VX_RM {
foreach m = MxList in {
defvar mx = m.MX;
defvar WriteVSMulV_MX = !cast<SchedWrite>("WriteVSMulV_" # mx);
defvar ReadVSMulV_MX = !cast<SchedRead>("ReadVSMulV_" # mx);
defvar ReadVSMulX_MX = !cast<SchedRead>("ReadVSMulX_" # mx);
- defm "" : VPseudoBinaryV_VV<m>,
+ defm "" : VPseudoBinaryV_VV_RM<m>,
Sched<[WriteVSMulV_MX, ReadVSMulV_MX, ReadVSMulV_MX, ReadVMask]>;
- defm "" : VPseudoBinaryV_VX<m>,
+ defm "" : VPseudoBinaryV_VX_RM<m>,
Sched<[WriteVSMulX_MX, ReadVSMulV_MX, ReadVSMulX_MX, ReadVMask]>;
}
}
}
}
-multiclass VPseudoVNCLP_WV_WX_WI {
+multiclass VPseudoVNCLP_WV_WX_WI_RM {
foreach m = MxListW in {
defvar mx = m.MX;
defvar WriteVNClipV_MX = !cast<SchedWrite>("WriteVNClipV_" # mx);
defvar ReadVNClipV_MX = !cast<SchedRead>("ReadVNClipV_" # mx);
defvar ReadVNClipX_MX = !cast<SchedRead>("ReadVNClipX_" # mx);
- defm "" : VPseudoBinaryV_WV<m>,
+ defm "" : VPseudoBinaryV_WV_RM<m>,
Sched<[WriteVNClipV_MX, ReadVNClipV_MX, ReadVNClipV_MX, ReadVMask]>;
- defm "" : VPseudoBinaryV_WX<m>,
+ defm "" : VPseudoBinaryV_WX_RM<m>,
Sched<[WriteVNClipX_MX, ReadVNClipV_MX, ReadVNClipX_MX, ReadVMask]>;
- defm "" : VPseudoBinaryV_WI<m>,
+ defm "" : VPseudoBinaryV_WI_RM<m>,
Sched<[WriteVNClipI_MX, ReadVNClipV_MX, ReadVMask]>;
}
}
vti.RegClass, imm_type>;
}
+multiclass VPatBinaryV_VI_RM<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist,
+ Operand imm_type> {
+ foreach vti = vtilist in
+ let Predicates = GetVTypePredicates<vti>.Predicates in
+ defm : VPatBinaryTARoundingMode<intrinsic,
+ instruction # "_VI_" # vti.LMul.MX,
+ vti.Vector, vti.Vector, XLenVT, vti.Mask,
+ vti.Log2SEW, vti.RegClass,
+ vti.RegClass, imm_type>;
+}
+
multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
foreach mti = AllMasks in
let Predicates = [HasVInstructions] in
}
}
+multiclass VPatBinaryV_WV_RM<string intrinsic, string instruction,
+ list<VTypeInfoToWide> vtilist> {
+ foreach VtiToWti = vtilist in {
+ defvar Vti = VtiToWti.Vti;
+ defvar Wti = VtiToWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
+ GetVTypePredicates<Wti>.Predicates) in
+ defm : VPatBinaryTARoundingMode<intrinsic,
+ instruction # "_WV_" # Vti.LMul.MX,
+ Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
+ Vti.Log2SEW, Vti.RegClass,
+ Wti.RegClass, Vti.RegClass>;
+ }
+}
+
multiclass VPatBinaryV_WX<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
}
}
+multiclass VPatBinaryV_WX_RM<string intrinsic, string instruction,
+ list<VTypeInfoToWide> vtilist> {
+ foreach VtiToWti = vtilist in {
+ defvar Vti = VtiToWti.Vti;
+ defvar Wti = VtiToWti.Wti;
+ defvar kind = "W"#Vti.ScalarSuffix;
+ let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
+ GetVTypePredicates<Wti>.Predicates) in
+ defm : VPatBinaryTARoundingMode<intrinsic,
+ instruction#"_"#kind#"_"#Vti.LMul.MX,
+ Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
+ Vti.Log2SEW, Vti.RegClass,
+ Wti.RegClass, Vti.ScalarRegClass>;
+ }
+}
+
+
multiclass VPatBinaryV_WI<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist> {
foreach VtiToWti = vtilist in {
}
}
+multiclass VPatBinaryV_WI_RM<string intrinsic, string instruction,
+ list<VTypeInfoToWide> vtilist> {
+ foreach VtiToWti = vtilist in {
+ defvar Vti = VtiToWti.Vti;
+ defvar Wti = VtiToWti.Wti;
+ let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
+ GetVTypePredicates<Wti>.Predicates) in
+ defm : VPatBinaryTARoundingMode<intrinsic,
+ instruction # "_WI_" # Vti.LMul.MX,
+ Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
+ Vti.Log2SEW, Vti.RegClass,
+ Wti.RegClass, uimm5>;
+ }
+}
+
multiclass VPatBinaryV_VM<string intrinsic, string instruction,
bit CarryOut = 0,
list<VTypeInfo> vtilist = AllIntegerVectors> {
VPatBinaryV_VX<intrinsic, instruction, vtilist>,
VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
+multiclass VPatBinaryV_VV_VX_VI_RM<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist, Operand ImmType = simm5>
+ : VPatBinaryV_VV_RM<intrinsic, instruction, vtilist>,
+ VPatBinaryV_VX_RM<intrinsic, instruction, vtilist>,
+ VPatBinaryV_VI_RM<intrinsic, instruction, vtilist, ImmType>;
+
multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
list<VTypeInfo> vtilist, bit isSEWAware = 0>
: VPatBinaryV_VV<intrinsic, instruction, vtilist, isSEWAware>,
VPatBinaryV_WX<intrinsic, instruction, vtilist>,
VPatBinaryV_WI<intrinsic, instruction, vtilist>;
+multiclass VPatBinaryV_WV_WX_WI_RM<string intrinsic, string instruction,
+ list<VTypeInfoToWide> vtilist>
+ : VPatBinaryV_WV_RM<intrinsic, instruction, vtilist>,
+ VPatBinaryV_WX_RM<intrinsic, instruction, vtilist>,
+ VPatBinaryV_WI_RM<intrinsic, instruction, vtilist>;
+
multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
: VPatBinaryV_VM_TAIL<intrinsic, instruction>,
VPatBinaryV_XM_TAIL<intrinsic, instruction>,
//===----------------------------------------------------------------------===//
// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
//===----------------------------------------------------------------------===//
-let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
- defm PseudoVSMUL : VPseudoVSMUL_VV_VX;
+let Defs = [VXSAT], hasSideEffects = 1 in {
+ defm PseudoVSMUL : VPseudoVSMUL_VV_VX_RM;
}
//===----------------------------------------------------------------------===//
// 12.4. Vector Single-Width Scaling Shift Instructions
//===----------------------------------------------------------------------===//
-let Uses = [VXRM], hasSideEffects = 1 in {
- defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI<uimm5>;
- defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI<uimm5>;
-}
+defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
+defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
//===----------------------------------------------------------------------===//
// 12.5. Vector Narrowing Fixed-Point Clip Instructions
//===----------------------------------------------------------------------===//
-let Uses = [VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
- defm PseudoVNCLIP : VPseudoVNCLP_WV_WX_WI;
- defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI;
+let Defs = [VXSAT], hasSideEffects = 1 in {
+ defm PseudoVNCLIP : VPseudoVNCLP_WV_WX_WI_RM;
+ defm PseudoVNCLIPU : VPseudoVNCLP_WV_WX_WI_RM;
}
} // Predicates = [HasVInstructions]
//===----------------------------------------------------------------------===//
// 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
//===----------------------------------------------------------------------===//
-defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", IntegerVectorsExceptI64>;
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
+ IntegerVectorsExceptI64>;
// vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*.
let Predicates = [HasVInstructionsFullMultiply] in
-defm : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", I64IntegerVectors>;
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vsmul", "PseudoVSMUL",
+ I64IntegerVectors>;
//===----------------------------------------------------------------------===//
// 12.4. Vector Single-Width Scaling Shift Instructions
//===----------------------------------------------------------------------===//
-defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors,
- uimm5>;
-defm : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors,
- uimm5>;
+defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssrl", "PseudoVSSRL",
+ AllIntegerVectors, uimm5>;
+defm : VPatBinaryV_VV_VX_VI_RM<"int_riscv_vssra", "PseudoVSSRA",
+ AllIntegerVectors, uimm5>;
//===----------------------------------------------------------------------===//
// 12.5. Vector Narrowing Fixed-Point Clip Instructions
//===----------------------------------------------------------------------===//
-defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>;
-defm : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>;
+defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclipu", "PseudoVNCLIPU",
+ AllWidenableIntVectors>;
+defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclip", "PseudoVNCLIP",
+ AllWidenableIntVectors>;
//===----------------------------------------------------------------------===//
// 13. Vector Floating-Point Instructions
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 3)
+ iXLen 0, iXLen %3, iXLen 3)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 3)
+ iXLen 0, iXLen %3, iXLen 3)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 3)
+ iXLen 0, iXLen %3, iXLen 3)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 2)
+ iXLen 0, iXLen %4, iXLen 2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 2)
+ iXLen 0, iXLen %4, iXLen 2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 2)
+ iXLen 0, iXLen %4, iXLen 2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 0)
+ iXLen 0, iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 0)
+ iXLen 0, iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen,
+ iXLen,
iXLen)
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 0)
+ iXLen 0, iXLen %4, iXLen 0)
ret <vscale x 1 x i8> %a
}
; CHECK-NEXT: addi a1, a1, %lo(.L__const.test.var_45)
; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: li a1, 1
+; CHECK-NEXT: vmul.vx v12, v8, a1
; CHECK-NEXT: lui a1, %hi(.L__const.test.var_101)
; CHECK-NEXT: addi a1, a1, %lo(.L__const.test.var_101)
-; CHECK-NEXT: vle8.v v12, (a1)
-; CHECK-NEXT: li a1, 1
-; CHECK-NEXT: vmul.vx v16, v8, a1
-; CHECK-NEXT: vmv.x.s a1, v16
+; CHECK-NEXT: vle8.v v16, (a1)
+; CHECK-NEXT: vmv.x.s a1, v12
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vmsleu.vx v0, v8, a1
-; CHECK-NEXT: vssra.vv v8, v12, v8
+; CHECK-NEXT: vssra.vv v8, v16, v8
; CHECK-NEXT: vmerge.vvm v8, v8, v8, v0
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%2 = tail call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8.i64(<vscale x 32 x i8> undef, ptr nonnull @__const.test.var_101, i64 2)
%3 = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2)
%4 = tail call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %1)
- %5 = tail call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> %2, <vscale x 32 x i8> %0, i64 2)
+ %5 = tail call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> %2, <vscale x 32 x i8> %0, i64 0, i64 2)
%6 = tail call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> %0, i8 %4, i64 2)
%7 = tail call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %5, <vscale x 32 x i8> %5, <vscale x 32 x i1> %6, i64 2)
tail call void @llvm.riscv.vse.nxv32i8.i64(<vscale x 32 x i8> %7, ptr %var_99, i64 2)
declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i8, i64) #2
declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3
declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>) #2
-declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i64) #3
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i64, i64) #3
declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8>, i8, i64) #2
declare <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i64) #2
declare void @llvm.riscv.vse.nxv32i8.i64(<vscale x 32 x i8>, ptr nocapture, i64) #4
<vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
- iXLen %3)
+ iXLen 0, iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v9, v10
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
- iXLen %3)
+ iXLen 0, iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- iXLen %3)
+ iXLen 0, iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
+ iXLen,
iXLen);
define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
+; RV32-NEXT: csrwi vxrm, 0
; RV32-NEXT: vsmul.vv v8, v9, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
+; RV64-NEXT: csrwi vxrm, 0
; RV64-NEXT: vsmul.vx v8, v9, a0
; RV64-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
- iXLen %3)
+ iXLen 0, iXLen %3)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- iXLen %3)
+ iXLen 0, iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
- iXLen %3)
+ iXLen 0, iXLen %3)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v11, v8, v10
; CHECK-NEXT: vmv.v.v v8, v11
; CHECK-NEXT: ret
<vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v14, v8, v12
; CHECK-NEXT: vmv.v.v v8, v14
; CHECK-NEXT: ret
<vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v20, v8, v16
; CHECK-NEXT: vmv.v.v v8, v20
; CHECK-NEXT: ret
<vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v11, v8, v10
; CHECK-NEXT: vmv.v.v v8, v11
; CHECK-NEXT: ret
<vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v14, v8, v12
; CHECK-NEXT: vmv.v.v v8, v14
; CHECK-NEXT: ret
<vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v20, v8, v16
; CHECK-NEXT: vmv.v.v v8, v20
; CHECK-NEXT: ret
<vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v11, v8, v10
; CHECK-NEXT: vmv.v.v v8, v11
; CHECK-NEXT: ret
<vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v14, v8, v12
; CHECK-NEXT: vmv.v.v v8, v14
; CHECK-NEXT: ret
<vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v20, v8, v16
; CHECK-NEXT: vmv.v.v v8, v20
; CHECK-NEXT: ret
<vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
<vscale x 1 x i8>,
<vscale x 1 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i16>,
iXLen,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
iXLen %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
<vscale x 2 x i8>,
<vscale x 2 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i16>,
iXLen,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
iXLen %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
<vscale x 4 x i8>,
<vscale x 4 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i16>,
iXLen,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
iXLen %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
<vscale x 8 x i8>,
<vscale x 8 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v10, v8, a0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i16>,
iXLen,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
iXLen %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
<vscale x 16 x i8>,
<vscale x 16 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v12, v8, a0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i16>,
iXLen,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
iXLen %2,
<vscale x 16 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
<vscale x 32 x i8>,
<vscale x 32 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v16, v8, a0
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i16>,
iXLen,
<vscale x 32 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
iXLen %2,
<vscale x 32 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
<vscale x 1 x i16>,
<vscale x 1 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i32>,
iXLen,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
iXLen %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
<vscale x 2 x i16>,
<vscale x 2 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i32>,
iXLen,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
iXLen %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
<vscale x 4 x i16>,
<vscale x 4 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v10, v8, a0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i32>,
iXLen,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
iXLen %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
<vscale x 8 x i16>,
<vscale x 8 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v12, v8, a0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i32>,
iXLen,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
iXLen %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
<vscale x 16 x i16>,
<vscale x 16 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v16, v8, a0
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i32>,
iXLen,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
iXLen %2,
<vscale x 16 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i64>,
iXLen,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
iXLen %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v10, v8, a0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i64>,
iXLen,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
iXLen %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v12, v8, a0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i64>,
iXLen,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
iXLen %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v16, v8, a0
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i64>,
iXLen,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
iXLen %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
iXLen 9,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
iXLen 9,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
iXLen 9,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v10, v8, 9
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
iXLen 9,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v12, v8, 9
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 16 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
iXLen 9,
<vscale x 16 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 16 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v16, v8, 9
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 32 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
iXLen 9,
<vscale x 32 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 32 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
iXLen 9,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
iXLen 9,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v10, v8, 9
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
iXLen 9,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v12, v8, 9
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
iXLen 9,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v16, v8, 9
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 16 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
iXLen 9,
<vscale x 16 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 16 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
iXLen 9,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v10, v8, 9
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
iXLen 9,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v12, v8, 9
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
iXLen 9,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v16, v8, 9
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
iXLen 9,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v11, v8, v10
; CHECK-NEXT: vmv.v.v v8, v11
; CHECK-NEXT: ret
<vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v14, v8, v12
; CHECK-NEXT: vmv.v.v v8, v14
; CHECK-NEXT: ret
<vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v20, v8, v16
; CHECK-NEXT: vmv.v.v v8, v20
; CHECK-NEXT: ret
<vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v11, v8, v10
; CHECK-NEXT: vmv.v.v v8, v11
; CHECK-NEXT: ret
<vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v14, v8, v12
; CHECK-NEXT: vmv.v.v v8, v14
; CHECK-NEXT: ret
<vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v20, v8, v16
; CHECK-NEXT: vmv.v.v v8, v20
; CHECK-NEXT: ret
<vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v11, v8, v10
; CHECK-NEXT: vmv.v.v v8, v11
; CHECK-NEXT: ret
<vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v14, v8, v12
; CHECK-NEXT: vmv.v.v v8, v14
; CHECK-NEXT: ret
<vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v20, v8, v16
; CHECK-NEXT: vmv.v.v v8, v20
; CHECK-NEXT: ret
<vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
<vscale x 1 x i8>,
<vscale x 1 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i16>,
iXLen,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
iXLen %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
<vscale x 2 x i8>,
<vscale x 2 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i16>,
iXLen,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
iXLen %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
<vscale x 4 x i8>,
<vscale x 4 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i16>,
iXLen,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
iXLen %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
<vscale x 8 x i8>,
<vscale x 8 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v10, v8, a0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i16>,
iXLen,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
iXLen %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
<vscale x 16 x i8>,
<vscale x 16 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v12, v8, a0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i16>,
iXLen,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
iXLen %2,
<vscale x 16 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
<vscale x 32 x i8>,
<vscale x 32 x i16>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v16, v8, a0
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i16>,
iXLen,
<vscale x 32 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
iXLen %2,
<vscale x 32 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
<vscale x 1 x i16>,
<vscale x 1 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i32>,
iXLen,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
iXLen %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
<vscale x 2 x i16>,
<vscale x 2 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i32>,
iXLen,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
iXLen %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
<vscale x 4 x i16>,
<vscale x 4 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v10, v8, a0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i32>,
iXLen,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
iXLen %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
<vscale x 8 x i16>,
<vscale x 8 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v12, v8, a0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i32>,
iXLen,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
iXLen %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
<vscale x 16 x i16>,
<vscale x 16 x i32>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v16, v8, a0
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i32>,
iXLen,
<vscale x 16 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
iXLen %2,
<vscale x 16 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i64>,
iXLen,
<vscale x 1 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
iXLen %2,
<vscale x 1 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v10, v8, a0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i64>,
iXLen,
<vscale x 2 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
iXLen %2,
<vscale x 2 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v12, v8, a0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i64>,
iXLen,
<vscale x 4 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
iXLen %2,
<vscale x 4 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v16, v8, a0
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
iXLen %1,
- iXLen %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i64>,
iXLen,
<vscale x 8 x i1>,
- iXLen,
- iXLen);
+ iXLen, iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
iXLen %2,
<vscale x 8 x i1> %3,
- iXLen %4, iXLen 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
iXLen 9,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
iXLen 9,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
iXLen 9,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v10, v8, 9
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 8 x i8> undef,
<vscale x 8 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
iXLen 9,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v12, v8, 9
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 16 x i8> undef,
<vscale x 16 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 16 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
iXLen 9,
<vscale x 16 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 16 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v16, v8, 9
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 32 x i8> undef,
<vscale x 32 x i16> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 32 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
iXLen 9,
<vscale x 32 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 32 x i8> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
iXLen 9,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
iXLen 9,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v10, v8, 9
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 4 x i16> undef,
<vscale x 4 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
iXLen 9,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v12, v8, 9
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 8 x i16> undef,
<vscale x 8 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
iXLen 9,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v16, v8, 9
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 16 x i16> undef,
<vscale x 16 x i32> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 16 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
iXLen 9,
<vscale x 16 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 16 x i16> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v8, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i64> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 1 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
iXLen 9,
<vscale x 1 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 1 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v10, v8, 9
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
<vscale x 2 x i32> undef,
<vscale x 2 x i64> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 2 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
iXLen 9,
<vscale x 2 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 2 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v12, v8, 9
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
<vscale x 4 x i32> undef,
<vscale x 4 x i64> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 4 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
iXLen 9,
<vscale x 4 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 4 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v16, v8, 9
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
<vscale x 8 x i32> undef,
<vscale x 8 x i64> %0,
iXLen 9,
- iXLen %1)
+ iXLen 0, iXLen %1)
ret <vscale x 8 x i32> %a
}
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
iXLen 9,
<vscale x 8 x i1> %2,
- iXLen %3, iXLen 1)
+ iXLen 0, iXLen %3, iXLen 1)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ i32, i32);
define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ i32, i32);
define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ i32, i32);
define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ i32, i32);
define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ i32, i32);
define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ i32, i32);
define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ i32, i32);
define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ i32, i32);
define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ i32, i32);
define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ i32, i32);
define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ i32, i32);
define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ i32, i32);
define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ i32, i32);
define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ i32, i32);
define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ i32, i32);
define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ i32, i32);
define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ i32, i32);
define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ i32, i32);
define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ i32, i32);
define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ i32, i32);
define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ i32, i32);
define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ i32, i32);
define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ i32, i32);
define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ i32, i32);
define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ i32, i32);
define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ i32, i32);
define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ i32, i32);
define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ i32, i32);
define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ i32, i32);
define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ i32, i32);
define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ i32, i32);
define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ i32, i32);
define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ i32, i32);
define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ i32, i32);
define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ i32, i32);
define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
+ i32, i32);
define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlse64.v v9, (a0), zero
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlse64.v v10, (a0), zero
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
+ i32, i32);
define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlse64.v v10, (a0), zero
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlse64.v v12, (a0), zero
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
+ i32, i32);
define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlse64.v v12, (a0), zero
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vlse64.v v16, (a0), zero
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
+ i32, i32);
define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vlse64.v v16, (a0), zero
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ i32 0, i32 %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
+ i32, i32, i32);
define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; CHECK-NEXT: vlse64.v v24, (a0), zero
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ i32 0, i32 %4, i32 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ i64, i64);
define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ i64, i64);
define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ i64, i64);
define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ i64, i64);
define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ i64, i64);
define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ i64, i64);
define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64);
+ i64, i64);
define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ i64, i64);
define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ i64, i64);
define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ i64, i64);
define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ i64, i64);
define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ i64, i64);
define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64);
+ i64, i64);
define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ i64, i64);
define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ i64, i64);
define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ i64, i64);
define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ i64, i64);
define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64);
+ i64, i64);
define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 16 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ i64, i64);
define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ i64, i64);
define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ i64, i64);
define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 4 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64);
+ i64, i64);
define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 8 x i64> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i64);
+ i64, i64);
define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i64);
+ i64, i64);
define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 2 x i8> %a
}
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i64);
+ i64, i64);
define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 4 x i8> %a
}
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i64);
+ i64, i64);
define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 8 x i8> %a
}
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i64);
+ i64, i64);
define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 16 x i8> %a
}
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i64);
+ i64, i64);
define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 32 x i8> %a
}
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 32 x i8> %a
}
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i64);
+ i64, i64);
define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 64 x i8> %a
}
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 64 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i64);
+ i64, i64);
define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 1 x i16> %a
}
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i64);
+ i64, i64);
define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 2 x i16> %a
}
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i64);
+ i64, i64);
define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 4 x i16> %a
}
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i64);
+ i64, i64);
define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 8 x i16> %a
}
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i64);
+ i64, i64);
define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 16 x i16> %a
}
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 16 x i16> %a
}
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i64);
+ i64, i64);
define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 32 x i16> %a
}
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 32 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i64);
+ i64, i64);
define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 1 x i32> %a
}
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i64);
+ i64, i64);
define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 2 x i32> %a
}
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i64);
+ i64, i64);
define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 4 x i32> %a
}
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i64);
+ i64, i64);
define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 8 x i32> %a
}
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 8 x i32> %a
}
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i64);
+ i64, i64);
define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 16 x i32> %a
}
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 1 x i64> %a
}
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 2 x i64> %a
}
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 4 x i64> %a
}
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i64 %2)
+ i64 0, i64 %2)
ret <vscale x 8 x i64> %a
}
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64,
- i64);
+ i64, i64, i64);
define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
; CHECK-NEXT: ret
entry:
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ i64 0, i64 %4, i64 1)
ret <vscale x 8 x i64> %a
}
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssra_vv_i8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32 immarg, i32)
-define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssra_vx_i8mf8(<vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i32.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i32.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, i32, i32 immarg, i32)
-define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssra_vv_i8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32 immarg, i32)
-define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssra_vx_i8mf4(<vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i32.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i32.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, i32, i32 immarg, i32)
-define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssra_vv_i8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32 immarg, i32)
-define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssra_vx_i8mf2(<vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i32.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i32.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, i32, i32 immarg, i32)
-define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssra_vv_i8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32 immarg, i32)
-define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssra_vx_i8m1(<vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i32.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i32.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, i32, i32 immarg, i32)
-define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssra_vv_i8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32 immarg, i32)
-define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssra_vx_i8m2(<vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i32.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i32.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, i32, i32 immarg, i32)
-define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssra_vv_i8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i32 immarg, i32)
-define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssra_vx_i8m4(<vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i32.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i32.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, i32, i32 immarg, i32)
-define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssra_vv_i8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32,
- i32);
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, i32 immarg, i32)
-define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssra_vx_i8m8(<vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i32.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i32.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, i32, i32 immarg, i32)
-define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssra_vv_i16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16mf4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, i32 immarg, i32)
-define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssra_vx_i16mf4(<vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16mf4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i32.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i32.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, i32, i32 immarg, i32)
-define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssra_vv_i16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32 immarg, i32)
-define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssra_vx_i16mf2(<vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i32.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i32.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, i32, i32 immarg, i32)
-define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssra_vv_i16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, i32 immarg, i32)
-define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssra_vx_i16m1(<vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i32.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i32.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, i32, i32 immarg, i32)
-define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssra_vv_i16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32 immarg, i32)
-define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssra_vx_i16m2(<vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i32.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i32.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, i32, i32 immarg, i32)
-define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssra_vv_i16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, i32 immarg, i32)
-define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssra_vx_i16m4(<vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i32.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i32.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, i32, i32 immarg, i32)
-define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssra_vv_i16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, i32 immarg, i32)
-define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssra_vx_i16m8(<vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i32.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i32.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, i32, i32 immarg, i32)
-define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssra_vv_i32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, i32 immarg, i32)
-define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssra_vx_i32mf2(<vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, i32, i32 immarg, i32)
-define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssra_vv_i32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32 immarg, i32)
-define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssra_vx_i32m1(<vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, i32 immarg, i32)
-define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssra_vv_i32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32 immarg, i32)
-define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssra_vx_i32m2(<vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32, i32 immarg, i32)
-define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssra_vv_i32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32 immarg, i32)
-define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssra_vx_i32m4(<vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32, i32 immarg, i32)
-define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @test_vssra_vv_i32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, i32 immarg, i32)
-define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8:
+define <vscale x 16 x i32> @test_vssra_vx_i32m8(<vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, i32, i32 immarg, i32)
-define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8:
+define <vscale x 1 x i64> @test_vssra_vv_i64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32,
- i32);
+declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i32 immarg, i32)
-define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8:
+define <vscale x 1 x i64> @test_vssra_vx_i64m1(<vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i32.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i32.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, i32, i32 immarg, i32)
-define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8:
+define <vscale x 2 x i64> @test_vssra_vv_i64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32,
- i32);
+declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32 immarg, i32)
-define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8:
+define <vscale x 2 x i64> @test_vssra_vx_i64m2(<vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i32.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i32.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, i32, i32 immarg, i32)
-define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8:
+define <vscale x 4 x i64> @test_vssra_vv_i64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32,
- i32);
+declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, i32 immarg, i32)
-define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8:
+define <vscale x 4 x i64> @test_vssra_vx_i64m4(<vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32,
- <vscale x 32 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i32.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, i32, i32 immarg, i32)
-define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8:
+define <vscale x 8 x i64> @test_vssra_vv_i64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i64> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32,
- i32);
+declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32 immarg, i32)
-define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8:
+define <vscale x 8 x i64> @test_vssra_vx_i64m8(<vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i32.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i64> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32,
- i32);
+declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i32.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, i32, i32 immarg, i32)
-define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16:
+define <vscale x 1 x i8> @test_vssra_vv_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16:
+define <vscale x 1 x i8> @test_vssra_vx_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i32.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32,
- i32);
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i32.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16:
+define <vscale x 2 x i8> @test_vssra_vv_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16:
+define <vscale x 2 x i8> @test_vssra_vx_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i32.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32,
- i32);
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i32.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16:
+define <vscale x 4 x i8> @test_vssra_vv_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16:
+define <vscale x 4 x i8> @test_vssra_vx_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i32.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32,
- i32);
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i32.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16:
+define <vscale x 8 x i8> @test_vssra_vv_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16:
+define <vscale x 8 x i8> @test_vssra_vx_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i32.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32,
- i32);
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i32.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16:
+define <vscale x 16 x i8> @test_vssra_vv_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32,
- <vscale x 32 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16:
+define <vscale x 16 x i8> @test_vssra_vx_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i32.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32);
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i32.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32:
+define <vscale x 32 x i8> @test_vssra_vv_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32:
+define <vscale x 32 x i8> @test_vssra_vx_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i32.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i32 %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i32.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, i32, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32:
+define <vscale x 64 x i8> @test_vssra_vv_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, <vscale x 64 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32);
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32:
+define <vscale x 64 x i8> @test_vssra_vx_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i32.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i32 %shift, <vscale x 64 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i32.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, i32, <vscale x 64 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32:
+define <vscale x 1 x i16> @test_vssra_vv_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32);
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32:
+define <vscale x 1 x i16> @test_vssra_vx_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i32.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i32.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32:
+define <vscale x 2 x i16> @test_vssra_vv_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i32);
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32:
+define <vscale x 2 x i16> @test_vssra_vx_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i32.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i32.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32:
+define <vscale x 4 x i16> @test_vssra_vv_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32,
- i32);
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64:
+define <vscale x 4 x i16> @test_vssra_vx_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i32.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i32.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64:
+define <vscale x 8 x i16> @test_vssra_vv_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32,
- i32);
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64:
+define <vscale x 8 x i16> @test_vssra_vx_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i32.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i32.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64:
+define <vscale x 16 x i16> @test_vssra_vv_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32,
- i32);
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64:
+define <vscale x 16 x i16> @test_vssra_vx_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i32.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i32.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64:
+define <vscale x 32 x i16> @test_vssra_vv_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32,
- i32);
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64:
+define <vscale x 32 x i16> @test_vssra_vx_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i32.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i32 %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i32.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, i32, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64:
+define <vscale x 1 x i32> @test_vssra_vv_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i32> %0
}
-define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i8> %a
-}
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i32> @test_vssra_vx_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i32> %0
}
-define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i8> %a
-}
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i32> @test_vssra_vv_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i32> %0
}
-define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i8> %a
-}
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8:
+define <vscale x 2 x i32> @test_vssra_vx_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i32> %0
}
-define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i8> %a
-}
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8:
+define <vscale x 4 x i32> @test_vssra_vv_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i32> %0
}
-define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 16 x i8> %a
-}
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8:
+define <vscale x 4 x i32> @test_vssra_vx_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i32> %0
}
-define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 32 x i8> %a
-}
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8:
+define <vscale x 8 x i32> @test_vssra_vv_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 9,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i32> %0
}
-define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 9,
- <vscale x 64 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16:
+define <vscale x 8 x i32> @test_vssra_vx_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i32> %0
}
-define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16:
+define <vscale x 16 x i32> @test_vssra_vv_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i32> %0
}
-define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16:
+define <vscale x 16 x i32> @test_vssra_vx_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i32> %0
}
-define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16:
+define <vscale x 1 x i64> @test_vssra_vv_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i64> %0
}
-define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
+declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16:
+define <vscale x 1 x i64> @test_vssra_vx_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i32.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i64> %0
}
-define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
+declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i32.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16:
+define <vscale x 2 x i64> @test_vssra_vv_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i64> %0
}
-define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 9,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
+declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32:
+define <vscale x 2 x i64> @test_vssra_vx_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i32.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i64> %0
}
-define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
+declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i32.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32:
+define <vscale x 4 x i64> @test_vssra_vv_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i64> %0
}
-define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
+declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i64> @test_vssra_vx_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i64> %0
}
-define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
+declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i32.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i64> @test_vssra_vv_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i64> %0
}
-define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
+declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32:
+define <vscale x 8 x i64> @test_vssra_vx_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i32.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i64> %0
}
-define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
+declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i32.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssra_vv_i8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, i64 immarg, i64)
-define <vscale x 1 x i8> @intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssra_vx_i8mf8(<vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
+declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, i64, i64 immarg, i64)
-define <vscale x 2 x i8> @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssra_vv_i8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, i64 immarg, i64)
-define <vscale x 2 x i8> @intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i8_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssra_vx_i8mf4(<vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
+declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, i64, i64 immarg, i64)
-define <vscale x 4 x i8> @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssra_vv_i8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, i64 immarg, i64)
-define <vscale x 4 x i8> @intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i8_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssra_vx_i8mf2(<vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
+declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, i64, i64 immarg, i64)
-define <vscale x 8 x i8> @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssra_vv_i8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, i64 immarg, i64)
-define <vscale x 8 x i8> @intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i8_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssra_vx_i8m1(<vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
+declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, i64 immarg, i64)
-define <vscale x 16 x i8> @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssra_vv_i8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, i64 immarg, i64)
-define <vscale x 16 x i8> @intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i8_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssra_vx_i8m2(<vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
+declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, i64, i64 immarg, i64)
-define <vscale x 32 x i8> @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssra_vv_i8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i64 immarg, i64)
-define <vscale x 32 x i8> @intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i8_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssra_vx_i8m4(<vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
+declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64, i64 immarg, i64)
-define <vscale x 64 x i8> @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssra_vv_i8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, i64 immarg, i64)
-define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssra_vx_i8m8(<vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
+declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, i64, i64 immarg, i64)
-define <vscale x 1 x i16> @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssra_vv_i16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16mf4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, i64 immarg, i64)
-define <vscale x 1 x i16> @intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i16_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssra_vx_i16mf4(<vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16mf4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
+declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i64, i64 immarg, i64)
-define <vscale x 2 x i16> @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssra_vv_i16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, i64 immarg, i64)
-define <vscale x 2 x i16> @intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i16_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssra_vx_i16mf2(<vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
+declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, i64, i64 immarg, i64)
-define <vscale x 4 x i16> @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssra_vv_i16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, i64 immarg, i64)
-define <vscale x 4 x i16> @intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i16_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssra_vx_i16m1(<vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
+declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, i64, i64 immarg, i64)
-define <vscale x 8 x i16> @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssra_vv_i16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64 immarg, i64)
-define <vscale x 8 x i16> @intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i16_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssra_vx_i16m2(<vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
+declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, i64, i64 immarg, i64)
-define <vscale x 16 x i16> @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssra_vv_i16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, i64 immarg, i64)
-define <vscale x 16 x i16> @intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i16_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssra_vx_i16m4(<vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
+declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, i64, i64 immarg, i64)
-define <vscale x 32 x i16> @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssra_vv_i16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, i64 immarg, i64)
-define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssra_vx_i16m8(<vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
+declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, i64, i64 immarg, i64)
-define <vscale x 1 x i32> @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssra_vv_i32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, i64 immarg, i64)
-define <vscale x 1 x i32> @intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i32_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssra_vx_i32mf2(<vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
+declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, i64, i64 immarg, i64)
-define <vscale x 2 x i32> @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssra_vv_i32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i64 immarg, i64)
-define <vscale x 2 x i32> @intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i32_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssra_vx_i32m1(<vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
+declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, i64 immarg, i64)
-define <vscale x 4 x i32> @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssra_vv_i32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64 immarg, i64)
-define <vscale x 4 x i32> @intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i32_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssra_vx_i32m2(<vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
+declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, i64, i64 immarg, i64)
-define <vscale x 8 x i32> @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssra_vv_i32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, i64 immarg, i64)
-define <vscale x 8 x i32> @intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i32_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssra_vx_i32m4(<vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
+declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, i64, i64 immarg, i64)
-define <vscale x 16 x i32> @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @test_vssra_vv_i32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, i64 immarg, i64)
-define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @test_vssra_vx_i32m8(<vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
+declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, i64, i64 immarg, i64)
-define <vscale x 1 x i64> @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64:
+define <vscale x 1 x i64> @test_vssra_vv_i64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64 immarg, i64)
-define <vscale x 1 x i64> @intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv1i64_nxv1i64_nxv1i64:
+define <vscale x 1 x i64> @test_vssra_vx_i64m1(<vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vssra.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
+declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64, i64 immarg, i64)
-define <vscale x 2 x i64> @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64:
+define <vscale x 2 x i64> @test_vssra_vv_i64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i64 immarg, i64)
-define <vscale x 2 x i64> @intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv2i64_nxv2i64_nxv2i64:
+define <vscale x 2 x i64> @test_vssra_vx_i64m2(<vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vssra.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
+declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i64, i64 immarg, i64)
-define <vscale x 4 x i64> @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64:
+define <vscale x 4 x i64> @test_vssra_vv_i64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, i64 immarg, i64)
-define <vscale x 4 x i64> @intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv4i64_nxv4i64_nxv4i64:
+define <vscale x 4 x i64> @test_vssra_vx_i64m4(<vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vssra.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
+declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i64, i64 immarg, i64)
-define <vscale x 8 x i64> @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64:
+define <vscale x 8 x i64> @test_vssra_vv_i64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssra.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i64> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, i64 immarg, i64)
-define <vscale x 8 x i64> @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
+define <vscale x 8 x i64> @test_vssra_vx_i64m8(<vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, i64, i64 immarg, i64)
- ret <vscale x 8 x i64> %a
+define <vscale x 1 x i8> @test_vssra_vv_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf8_m:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64,
- i64);
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i8> @intrinsic_vssra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssra_vx_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i8> @intrinsic_vssra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i8_nxv1i8:
+define <vscale x 2 x i8> @test_vssra_vv_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64,
- i64);
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i8> @intrinsic_vssra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssra_vx_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i8> @intrinsic_vssra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i8_nxv2i8:
+define <vscale x 4 x i8> @test_vssra_vv_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64,
- i64);
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i8> @intrinsic_vssra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssra_vx_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8mf2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i8> @intrinsic_vssra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i8_nxv4i8:
+define <vscale x 8 x i8> @test_vssra_vv_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64,
- i64);
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i8> @intrinsic_vssra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssra_vx_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m1_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i8> @intrinsic_vssra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i8_nxv8i8:
+define <vscale x 16 x i8> @test_vssra_vv_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64,
- i64);
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i8> @intrinsic_vssra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssra_vx_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i8> @intrinsic_vssra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i8_nxv16i8:
+define <vscale x 32 x i8> @test_vssra_vv_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64,
- i64);
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 32 x i8> @intrinsic_vssra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssra_vx_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i64 %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64,
- <vscale x 32 x i1>,
- i64,
- i64);
+declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 32 x i8> @intrinsic_vssra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i8_nxv32i8:
+define <vscale x 64 x i8> @test_vssra_vv_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i8m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, <vscale x 64 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64,
- i64);
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 64 x i8> @intrinsic_vssra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssra_vx_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i8m8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i64 %shift, <vscale x 64 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64,
- <vscale x 64 x i1>,
- i64,
- i64);
+declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, i64, <vscale x 64 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 64 x i8> @intrinsic_vssra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv64i8_nxv64i8:
+define <vscale x 1 x i16> @test_vssra_vv_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64,
- i64);
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i16> @intrinsic_vssra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssra_vx_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16mf4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i16> @intrinsic_vssra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i16_nxv1i16:
+define <vscale x 2 x i16> @test_vssra_vv_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64,
- i64);
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i16> @intrinsic_vssra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssra_vx_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16mf2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i16> @intrinsic_vssra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i16_nxv2i16:
+define <vscale x 4 x i16> @test_vssra_vv_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64,
- i64);
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i16> @intrinsic_vssra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssra_vx_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m1_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i16> @intrinsic_vssra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i16_nxv4i16:
+define <vscale x 8 x i16> @test_vssra_vv_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64,
- i64);
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i16> @intrinsic_vssra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssra_vx_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i16> @intrinsic_vssra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i16_nxv8i16:
+define <vscale x 16 x i16> @test_vssra_vv_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64,
- i64);
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i16> @intrinsic_vssra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssra_vx_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i16> @intrinsic_vssra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i16_nxv16i16:
+define <vscale x 32 x i16> @test_vssra_vv_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i16m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64,
- i64);
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 32 x i16> @intrinsic_vssra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssra_vx_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i16m8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i64 %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64,
- <vscale x 32 x i1>,
- i64,
- i64);
+declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, i64, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 32 x i16> @intrinsic_vssra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv32i16_nxv32i16:
+define <vscale x 1 x i32> @test_vssra_vv_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64,
- i64);
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i32> @intrinsic_vssra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssra_vx_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32mf2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i32> @intrinsic_vssra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i32_nxv1i32:
+define <vscale x 2 x i32> @test_vssra_vv_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64,
- i64);
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i32> @intrinsic_vssra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssra_vx_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m1_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i32> @intrinsic_vssra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i32_nxv2i32:
+define <vscale x 4 x i32> @test_vssra_vv_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64,
- i64);
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i32> @intrinsic_vssra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssra_vx_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i32> @intrinsic_vssra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i32_nxv4i32:
+define <vscale x 8 x i32> @test_vssra_vv_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64,
- i64);
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i32> @intrinsic_vssra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssra_vx_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i32> @intrinsic_vssra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i32_nxv8i32:
+define <vscale x 16 x i32> @test_vssra_vv_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i32m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64,
- i64);
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i32> @intrinsic_vssra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @test_vssra_vx_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i32m8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i32> @intrinsic_vssra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv16i32_nxv16i32:
+define <vscale x 1 x i64> @test_vssra_vv_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
+declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i64> @intrinsic_vssra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64:
+define <vscale x 1 x i64> @test_vssra_vx_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m1_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i64> @intrinsic_vssra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv1i64_nxv1i64:
+define <vscale x 2 x i64> @test_vssra_vv_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vssra.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
+declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i64> @intrinsic_vssra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64:
+define <vscale x 2 x i64> @test_vssra_vx_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i64> @intrinsic_vssra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv2i64_nxv2i64:
+define <vscale x 4 x i64> @test_vssra_vv_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vssra.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
+declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i64> @intrinsic_vssra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64:
+define <vscale x 4 x i64> @test_vssra_vx_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i64> @intrinsic_vssra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv4i64_nxv4i64:
+define <vscale x 8 x i64> @test_vssra_vv_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vv_i64m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vssra.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i64> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
+declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i64> @intrinsic_vssra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64:
+define <vscale x 8 x i64> @test_vssra_vx_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssra_vx_i64m8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vssra.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vssra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vx_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i64> %0
}
-define <vscale x 2 x i8> @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 9,
- <vscale x 64 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssra_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vssra.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssra_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT: vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
+declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssrl_vv_u8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32 immarg, i32)
-define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssrl_vx_u8mf8(<vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i32.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i32.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, i32, i32 immarg, i32)
-define <vscale x 2 x i8> @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssrl_vv_u8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32 immarg, i32)
-define <vscale x 2 x i8> @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssrl_vx_u8mf4(<vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i32.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i32.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, i32, i32 immarg, i32)
-define <vscale x 4 x i8> @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssrl_vv_u8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32 immarg, i32)
-define <vscale x 4 x i8> @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssrl_vx_u8mf2(<vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i32.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i32.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, i32, i32 immarg, i32)
-define <vscale x 8 x i8> @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssrl_vv_u8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32 immarg, i32)
-define <vscale x 8 x i8> @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssrl_vx_u8m1(<vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i32.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i32.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, i32, i32 immarg, i32)
-define <vscale x 16 x i8> @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssrl_vv_u8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32 immarg, i32)
-define <vscale x 16 x i8> @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssrl_vx_u8m2(<vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i32.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i32.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, i32, i32 immarg, i32)
-define <vscale x 32 x i8> @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssrl_vv_u8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i32 immarg, i32)
-define <vscale x 32 x i8> @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssrl_vx_u8m4(<vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i32.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i32.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, i32, i32 immarg, i32)
-define <vscale x 64 x i8> @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssrl_vv_u8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 0, i32 %vl)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32,
- i32);
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, i32 immarg, i32)
-define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssrl_vx_u8m8(<vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i32.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i32.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, i32, i32 immarg, i32)
-define <vscale x 1 x i16> @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssrl_vv_u16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16mf4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, i32 immarg, i32)
-define <vscale x 1 x i16> @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssrl_vx_u16mf4(<vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16mf4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i32.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i32.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, i32, i32 immarg, i32)
-define <vscale x 2 x i16> @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssrl_vv_u16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32 immarg, i32)
-define <vscale x 2 x i16> @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssrl_vx_u16mf2(<vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i32.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i32.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, i32, i32 immarg, i32)
-define <vscale x 4 x i16> @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssrl_vv_u16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, i32 immarg, i32)
-define <vscale x 4 x i16> @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssrl_vx_u16m1(<vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i32.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i32.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, i32, i32 immarg, i32)
-define <vscale x 8 x i16> @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssrl_vv_u16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32 immarg, i32)
-define <vscale x 8 x i16> @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssrl_vx_u16m2(<vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i32.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i32.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, i32, i32 immarg, i32)
-define <vscale x 16 x i16> @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssrl_vv_u16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, i32 immarg, i32)
-define <vscale x 16 x i16> @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssrl_vx_u16m4(<vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i32.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i32.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, i32, i32 immarg, i32)
-define <vscale x 32 x i16> @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssrl_vv_u16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 0, i32 %vl)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, i32 immarg, i32)
-define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssrl_vx_u16m8(<vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i32.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i32.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, i32, i32 immarg, i32)
-define <vscale x 1 x i32> @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssrl_vv_u32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, i32 immarg, i32)
-define <vscale x 1 x i32> @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssrl_vx_u32mf2(<vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, i32, i32 immarg, i32)
-define <vscale x 2 x i32> @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssrl_vv_u32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32 immarg, i32)
-define <vscale x 2 x i32> @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssrl_vx_u32m1(<vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, i32 immarg, i32)
-define <vscale x 4 x i32> @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssrl_vv_u32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32 immarg, i32)
-define <vscale x 4 x i32> @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssrl_vx_u32m2(<vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32, i32 immarg, i32)
-define <vscale x 8 x i32> @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssrl_vv_u32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32 immarg, i32)
-define <vscale x 8 x i32> @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssrl_vx_u32m4(<vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32, i32 immarg, i32)
-define <vscale x 16 x i32> @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @test_vssrl_vv_u32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, i32 immarg, i32)
-define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8:
+define <vscale x 16 x i32> @test_vssrl_vx_u32m8(<vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, i32, i32 immarg, i32)
-define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8:
+define <vscale x 1 x i64> @test_vssrl_vv_u64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32,
- i32);
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i32 immarg, i32)
-define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8:
+define <vscale x 1 x i64> @test_vssrl_vx_u64m1(<vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i32.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i32.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, i32, i32 immarg, i32)
-define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8:
+define <vscale x 2 x i64> @test_vssrl_vv_u64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32,
- i32);
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32 immarg, i32)
-define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8:
+define <vscale x 2 x i64> @test_vssrl_vx_u64m2(<vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i32.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i32.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, i32, i32 immarg, i32)
-define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8:
+define <vscale x 4 x i64> @test_vssrl_vv_u64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32,
- i32);
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, i32 immarg, i32)
-define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8:
+define <vscale x 4 x i64> @test_vssrl_vx_u64m4(<vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32,
- <vscale x 32 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i32.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, i32, i32 immarg, i32)
-define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8:
+define <vscale x 8 x i64> @test_vssrl_vv_u64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i64> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32,
- i32);
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32 immarg, i32)
-define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8:
+define <vscale x 8 x i64> @test_vssrl_vx_u64m8(<vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i32.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i32 %shift, i32 0, i32 %vl)
+ ret <vscale x 8 x i64> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32,
- i32);
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i32.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, i32, i32 immarg, i32)
-define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16:
+define <vscale x 1 x i8> @test_vssrl_vv_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16:
+define <vscale x 1 x i8> @test_vssrl_vx_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32,
- i32);
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16:
+define <vscale x 2 x i8> @test_vssrl_vv_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16:
+define <vscale x 2 x i8> @test_vssrl_vx_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32,
- i32);
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16:
+define <vscale x 4 x i8> @test_vssrl_vv_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16:
+define <vscale x 4 x i8> @test_vssrl_vx_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32,
- i32);
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16:
+define <vscale x 8 x i8> @test_vssrl_vv_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16:
+define <vscale x 8 x i8> @test_vssrl_vx_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32,
- i32);
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16:
+define <vscale x 16 x i8> @test_vssrl_vv_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32,
- <vscale x 32 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16:
+define <vscale x 16 x i8> @test_vssrl_vx_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32);
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32:
+define <vscale x 32 x i8> @test_vssrl_vv_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32:
+define <vscale x 32 x i8> @test_vssrl_vx_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i32 %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, i32, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32:
+define <vscale x 64 x i8> @test_vssrl_vv_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, <vscale x 64 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32);
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32:
+define <vscale x 64 x i8> @test_vssrl_vx_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i32 %shift, <vscale x 64 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, i32, <vscale x 64 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32:
+define <vscale x 1 x i16> @test_vssrl_vv_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32);
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32:
+define <vscale x 1 x i16> @test_vssrl_vx_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32:
+define <vscale x 2 x i16> @test_vssrl_vv_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i32);
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32:
+define <vscale x 2 x i16> @test_vssrl_vx_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32:
+define <vscale x 4 x i16> @test_vssrl_vv_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32,
- i32);
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64:
+define <vscale x 4 x i16> @test_vssrl_vx_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64:
+define <vscale x 8 x i16> @test_vssrl_vv_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32,
- i32);
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64:
+define <vscale x 8 x i16> @test_vssrl_vx_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64:
+define <vscale x 16 x i16> @test_vssrl_vv_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32,
- i32);
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64:
+define <vscale x 16 x i16> @test_vssrl_vx_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64:
+define <vscale x 32 x i16> @test_vssrl_vv_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32,
- i32);
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64:
+define <vscale x 32 x i16> @test_vssrl_vx_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i32 %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, i32, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64:
+define <vscale x 1 x i32> @test_vssrl_vv_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i32> %0
}
-define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i8> %a
-}
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8:
+define <vscale x 1 x i32> @test_vssrl_vx_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i32> %0
}
-define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i8> %a
-}
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8:
+define <vscale x 2 x i32> @test_vssrl_vv_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i32> %0
}
-define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i8> %a
-}
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8:
+define <vscale x 2 x i32> @test_vssrl_vx_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i32> %0
}
-define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i8> %a
-}
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8:
+define <vscale x 4 x i32> @test_vssrl_vv_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i32> %0
}
-define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 16 x i8> %a
-}
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8:
+define <vscale x 4 x i32> @test_vssrl_vx_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i32> %0
}
-define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 32 x i8> %a
-}
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8:
+define <vscale x 8 x i32> @test_vssrl_vv_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 9,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i32> %0
}
-define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 9,
- <vscale x 64 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16:
+define <vscale x 8 x i32> @test_vssrl_vx_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i32> %0
}
-define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16:
+define <vscale x 16 x i32> @test_vssrl_vv_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i32> %0
}
-define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16:
+define <vscale x 16 x i32> @test_vssrl_vx_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 16 x i32> %0
}
-define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16:
+define <vscale x 1 x i64> @test_vssrl_vv_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i64> %0
}
-define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16:
+define <vscale x 1 x i64> @test_vssrl_vx_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 1 x i64> %0
}
-define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16:
+define <vscale x 2 x i64> @test_vssrl_vv_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i64> %0
}
-define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 9,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32:
+define <vscale x 2 x i64> @test_vssrl_vx_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 2 x i64> %0
}
-define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32:
+define <vscale x 4 x i64> @test_vssrl_vv_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i64> %0
}
-define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32:
+define <vscale x 4 x i64> @test_vssrl_vx_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 4 x i64> %0
}
-define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32:
+define <vscale x 8 x i64> @test_vssrl_vv_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i64> %0
}
-define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32:
+define <vscale x 8 x i64> @test_vssrl_vx_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
+ ret <vscale x 8 x i64> %0
}
-define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssrl_vv_u8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, i64 immarg, i64)
-define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssrl_vx_u8mf8(<vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, i64, i64 immarg, i64)
-define <vscale x 2 x i8> @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssrl_vv_u8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, i64 immarg, i64)
-define <vscale x 2 x i8> @intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i8_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssrl_vx_u8mf4(<vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, i64, i64 immarg, i64)
-define <vscale x 4 x i8> @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssrl_vv_u8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, i64 immarg, i64)
-define <vscale x 4 x i8> @intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i8_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssrl_vx_u8mf2(<vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, i64, i64 immarg, i64)
-define <vscale x 8 x i8> @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssrl_vv_u8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, i64 immarg, i64)
-define <vscale x 8 x i8> @intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i8_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssrl_vx_u8m1(<vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, i64 immarg, i64)
-define <vscale x 16 x i8> @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssrl_vv_u8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, i64 immarg, i64)
-define <vscale x 16 x i8> @intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i8_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssrl_vx_u8m2(<vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, i64, i64 immarg, i64)
-define <vscale x 32 x i8> @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssrl_vv_u8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i64 immarg, i64)
-define <vscale x 32 x i8> @intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i8_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssrl_vx_u8m4(<vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64, i64 immarg, i64)
-define <vscale x 64 x i8> @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssrl_vv_u8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 0, i64 %vl)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, i64 immarg, i64)
-define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssrl_vx_u8m8(<vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, i64, i64 immarg, i64)
-define <vscale x 1 x i16> @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssrl_vv_u16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16mf4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, i64 immarg, i64)
-define <vscale x 1 x i16> @intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i16_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssrl_vx_u16mf4(<vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16mf4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i64, i64 immarg, i64)
-define <vscale x 2 x i16> @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssrl_vv_u16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, i64 immarg, i64)
-define <vscale x 2 x i16> @intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i16_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssrl_vx_u16mf2(<vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, i64, i64 immarg, i64)
-define <vscale x 4 x i16> @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssrl_vv_u16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, i64 immarg, i64)
-define <vscale x 4 x i16> @intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i16_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssrl_vx_u16m1(<vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, i64, i64 immarg, i64)
-define <vscale x 8 x i16> @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssrl_vv_u16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i64 immarg, i64)
-define <vscale x 8 x i16> @intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i16_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssrl_vx_u16m2(<vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, i64, i64 immarg, i64)
-define <vscale x 16 x i16> @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssrl_vv_u16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, i64 immarg, i64)
-define <vscale x 16 x i16> @intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i16_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssrl_vx_u16m4(<vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, i64, i64 immarg, i64)
-define <vscale x 32 x i16> @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssrl_vv_u16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 0, i64 %vl)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, i64 immarg, i64)
-define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssrl_vx_u16m8(<vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, i64, i64 immarg, i64)
-define <vscale x 1 x i32> @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssrl_vv_u32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32mf2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, i64 immarg, i64)
-define <vscale x 1 x i32> @intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i32_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssrl_vx_u32mf2(<vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32mf2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, i64, i64 immarg, i64)
-define <vscale x 2 x i32> @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssrl_vv_u32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i64 immarg, i64)
-define <vscale x 2 x i32> @intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i32_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssrl_vx_u32m1(<vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, i64 immarg, i64)
-define <vscale x 4 x i32> @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssrl_vv_u32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i64 immarg, i64)
-define <vscale x 4 x i32> @intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i32_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssrl_vx_u32m2(<vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, i64, i64 immarg, i64)
-define <vscale x 8 x i32> @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssrl_vv_u32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, i64 immarg, i64)
-define <vscale x 8 x i32> @intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i32_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssrl_vx_u32m4(<vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, i64, i64 immarg, i64)
-define <vscale x 16 x i32> @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @test_vssrl_vv_u32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, i64 immarg, i64)
-define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @test_vssrl_vx_u32m8(<vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, i64, i64 immarg, i64)
-define <vscale x 1 x i64> @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64:
+define <vscale x 1 x i64> @test_vssrl_vv_u64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64 immarg, i64)
-define <vscale x 1 x i64> @intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i64_nxv1i64_nxv1i64:
+define <vscale x 1 x i64> @test_vssrl_vx_u64m1(<vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64, i64 immarg, i64)
-define <vscale x 2 x i64> @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64:
+define <vscale x 2 x i64> @test_vssrl_vv_u64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v10
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i64 immarg, i64)
-define <vscale x 2 x i64> @intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv2i64_nxv2i64_nxv2i64:
+define <vscale x 2 x i64> @test_vssrl_vx_u64m2(<vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i64, i64 immarg, i64)
-define <vscale x 4 x i64> @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64:
+define <vscale x 4 x i64> @test_vssrl_vv_u64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v12
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, i64 immarg, i64)
-define <vscale x 4 x i64> @intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv4i64_nxv4i64_nxv4i64:
+define <vscale x 4 x i64> @test_vssrl_vx_u64m4(<vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i64, i64 immarg, i64)
-define <vscale x 8 x i64> @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64:
+define <vscale x 8 x i64> @test_vssrl_vv_u64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vssrl.vv v8, v8, v16
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i64> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, i64 immarg, i64)
-define <vscale x 8 x i64> @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
+define <vscale x 8 x i64> @test_vssrl_vx_u64m8(<vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i64 %shift, i64 0, i64 %vl)
+ ret <vscale x 8 x i64> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, i64, i64 immarg, i64)
- ret <vscale x 8 x i64> %a
+define <vscale x 1 x i8> @test_vssrl_vv_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf8_m:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64,
- i64);
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i8> @intrinsic_vssrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8:
+define <vscale x 1 x i8> @test_vssrl_vx_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i8> %0
}
-declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8>, <vscale x 1 x i8>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i8> @intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i8_nxv1i8:
+define <vscale x 2 x i8> @test_vssrl_vv_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64,
- i64);
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i8> @intrinsic_vssrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8:
+define <vscale x 2 x i8> @test_vssrl_vx_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i8> %0
}
-declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8>, <vscale x 2 x i8>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i8> @intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i8_nxv2i8:
+define <vscale x 4 x i8> @test_vssrl_vv_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64,
- i64);
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i8> @intrinsic_vssrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8:
+define <vscale x 4 x i8> @test_vssrl_vx_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8mf2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i8> %0
}
-declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8>, <vscale x 4 x i8>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i8> @intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i8_nxv4i8:
+define <vscale x 8 x i8> @test_vssrl_vv_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64,
- i64);
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i8> @intrinsic_vssrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8:
+define <vscale x 8 x i8> @test_vssrl_vx_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m1_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i8> %0
}
-declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i8> @intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i8_nxv8i8:
+define <vscale x 16 x i8> @test_vssrl_vv_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64,
- i64);
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i8> @intrinsic_vssrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8:
+define <vscale x 16 x i8> @test_vssrl_vx_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i8> %0
}
-declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8>, <vscale x 16 x i8>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i8> @intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i8_nxv16i8:
+define <vscale x 32 x i8> @test_vssrl_vv_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64,
- i64);
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 32 x i8> @intrinsic_vssrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8:
+define <vscale x 32 x i8> @test_vssrl_vx_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i64 %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 32 x i8> %0
}
-declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64,
- <vscale x 32 x i1>,
- i64,
- i64);
+declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8>, <vscale x 32 x i8>, i64, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 32 x i8> @intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i8_nxv32i8:
+define <vscale x 64 x i8> @test_vssrl_vv_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u8m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, <vscale x 64 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64,
- i64);
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 64 x i8> @intrinsic_vssrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8:
+define <vscale x 64 x i8> @test_vssrl_vx_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u8m8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i64 %shift, <vscale x 64 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 64 x i8> %0
}
-declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64,
- <vscale x 64 x i1>,
- i64,
- i64);
+declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, i64, <vscale x 64 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 64 x i8> @intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv64i8_nxv64i8:
+define <vscale x 1 x i16> @test_vssrl_vv_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16mf4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64,
- i64);
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i16> @intrinsic_vssrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16:
+define <vscale x 1 x i16> @test_vssrl_vx_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16mf4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i16> %0
}
-declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16>, <vscale x 1 x i16>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i16> @intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i16_nxv1i16:
+define <vscale x 2 x i16> @test_vssrl_vv_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64,
- i64);
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i16> @intrinsic_vssrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16:
+define <vscale x 2 x i16> @test_vssrl_vx_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16mf2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i16> %0
}
-declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16>, <vscale x 2 x i16>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i16> @intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i16_nxv2i16:
+define <vscale x 4 x i16> @test_vssrl_vv_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64,
- i64);
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i16> @intrinsic_vssrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16:
+define <vscale x 4 x i16> @test_vssrl_vx_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m1_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i16> %0
}
-declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16>, <vscale x 4 x i16>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i16> @intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i16_nxv4i16:
+define <vscale x 8 x i16> @test_vssrl_vv_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64,
- i64);
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i16> @intrinsic_vssrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16:
+define <vscale x 8 x i16> @test_vssrl_vx_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i16> %0
}
-declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i16> @intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i16_nxv8i16:
+define <vscale x 16 x i16> @test_vssrl_vv_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64,
- i64);
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i16> @intrinsic_vssrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16:
+define <vscale x 16 x i16> @test_vssrl_vx_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i16> %0
}
-declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i16> @intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i16_nxv16i16:
+define <vscale x 32 x i16> @test_vssrl_vv_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u16m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64,
- i64);
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 32 x i16> @intrinsic_vssrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16:
+define <vscale x 32 x i16> @test_vssrl_vx_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u16m8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i64 %shift, <vscale x 32 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 32 x i16> %0
}
-declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64,
- <vscale x 32 x i1>,
- i64,
- i64);
+declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16>, <vscale x 32 x i16>, i64, <vscale x 32 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 32 x i16> @intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv32i16_nxv32i16:
+define <vscale x 1 x i32> @test_vssrl_vv_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32mf2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64,
- i64);
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i32> @intrinsic_vssrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32:
+define <vscale x 1 x i32> @test_vssrl_vx_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32mf2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i32> %0
}
-declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32>, <vscale x 1 x i32>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i32> @intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i32_nxv1i32:
+define <vscale x 2 x i32> @test_vssrl_vv_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64,
- i64);
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i32> @intrinsic_vssrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32:
+define <vscale x 2 x i32> @test_vssrl_vx_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m1_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i32> %0
}
-declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32>, <vscale x 2 x i32>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i32> @intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i32_nxv2i32:
+define <vscale x 4 x i32> @test_vssrl_vv_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64,
- i64);
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i32> @intrinsic_vssrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32:
+define <vscale x 4 x i32> @test_vssrl_vx_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i32> %0
}
-declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32>, <vscale x 4 x i32>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i32> @intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i32_nxv4i32:
+define <vscale x 8 x i32> @test_vssrl_vv_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64,
- i64);
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i32> @intrinsic_vssrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32:
+define <vscale x 8 x i32> @test_vssrl_vx_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i32> %0
}
-declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
+declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32>, <vscale x 8 x i32>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i32> @intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i32_nxv8i32:
+define <vscale x 16 x i32> @test_vssrl_vv_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u32m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64,
- i64);
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i32> @intrinsic_vssrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32:
+define <vscale x 16 x i32> @test_vssrl_vx_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u32m8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i64 %shift, <vscale x 16 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 16 x i32> %0
}
-declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64,
- <vscale x 16 x i1>,
- i64,
- i64);
+declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32>, <vscale x 16 x i32>, i64, <vscale x 16 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 16 x i32> @intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv16i32_nxv16i32:
+define <vscale x 1 x i64> @test_vssrl_vv_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m1_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i64> @intrinsic_vssrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64:
+define <vscale x 1 x i64> @test_vssrl_vx_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m1_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i64 %shift, <vscale x 1 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 1 x i64> %0
}
-declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
+declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64, <vscale x 1 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 1 x i64> @intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv1i64_nxv1i64:
+define <vscale x 2 x i64> @test_vssrl_vv_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m2_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i64> @intrinsic_vssrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64:
+define <vscale x 2 x i64> @test_vssrl_vx_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m2_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i64 %shift, <vscale x 2 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 2 x i64> %0
}
-declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
+declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i64, <vscale x 2 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 2 x i64> @intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv2i64_nxv2i64:
+define <vscale x 4 x i64> @test_vssrl_vv_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m4_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i64> @intrinsic_vssrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64:
+define <vscale x 4 x i64> @test_vssrl_vx_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m4_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i64 %shift, <vscale x 4 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 4 x i64> %0
}
-declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
+declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i64, <vscale x 4 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 4 x i64> @intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv4i64_nxv4i64:
+define <vscale x 8 x i64> @test_vssrl_vv_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vv_u64m8_m:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i64> %0
}
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)
-define <vscale x 8 x i64> @intrinsic_vssrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64:
+define <vscale x 8 x i64> @test_vssrl_vx_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
+; CHECK-LABEL: test_vssrl_vx_u64m8_m:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vssrl.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vx_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
+; CHECK-NEXT: csrwi vxrm, 0
+; CHECK-NEXT: vssrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i8> %a
+ %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i64 %shift, <vscale x 8 x i1> %mask, i64 0, i64 %vl, i64 3)
+ ret <vscale x 8 x i64> %0
}
-define <vscale x 2 x i8> @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 9,
- <vscale x 64 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vssrl.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vssrl_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT: vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
+declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64>, <vscale x 8 x i64>, i64, <vscale x 8 x i1>, i64 immarg, i64, i64 immarg)