This adds intrinsics for vmv.x.s and vmv.s.x.
I've used stricter type constraints on these intrinsics than what we've been doing on the arithmetic intrinsics so far. This will allow us to not need to pass the scalar type to the Intrinsic::getDeclaration call when creating these intrinsics.
A custom ISD is used for vmv.x.s in order to implement the change in computeNumSignBitsForTargetNode which can remove sign extends on the result.
I also modified the MC layer description of these instructions to show the tied source/dest operand. This is different than what we do for masked instructions where we drop the tied source operand when converting to MC. But it is a more accurate description of the instruction. We can't do this for masked instructions since we use the same MC instruction for masked and unmasked. Tools like llvm-mca operate in the MC layer and rely on ins/outs and Uses/Defs for analysis so I don't know if we'll be able to maintain the current behavior for masked instructions. So I went with the accurate description here since it was easy.
Reviewed By: frasercrmck
Differential Revision: https://reviews.llvm.org/D93365
def int_riscv_vmv_v_v : RISCVUnary;
def int_riscv_vmv_v_x : RISCVUnary;
+
+ def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic;
+ def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMVectorElementType<0>,
+ llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let ExtendOperand = 2;
+ }
} // TargetPrefix = "riscv"
setBooleanVectorContents(ZeroOrOneBooleanContent);
// RVV intrinsics may have illegal operands.
+ // We also need to custom legalize vmv.x.s.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
if (Subtarget.is64Bit()) {
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
}
}
assert(II->ExtendedOperand < Op.getNumOperands());
SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
SDValue &ScalarOp = Operands[II->ExtendedOperand];
- if (ScalarOp.getValueType() == MVT::i8 ||
- ScalarOp.getValueType() == MVT::i16 ||
- ScalarOp.getValueType() == MVT::i32) {
+ EVT OpVT = ScalarOp.getValueType();
+ if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
+ (OpVT == MVT::i32 && Subtarget.is64Bit())) {
ScalarOp =
DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getRegister(RISCV::X4, PtrVT);
}
+ case Intrinsic::riscv_vmv_x_s:
+ assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!");
+ return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
+ Op.getOperand(1));
}
}
assert(ExtendOp < Op.getNumOperands());
SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
SDValue &ScalarOp = Operands[ExtendOp];
- if (ScalarOp.getValueType() == MVT::i32 ||
- ScalarOp.getValueType() == MVT::i16 ||
- ScalarOp.getValueType() == MVT::i8) {
+ EVT OpVT = ScalarOp.getValueType();
+ if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
+ (OpVT == MVT::i32 && Subtarget.is64Bit())) {
ScalarOp =
DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), Operands);
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
break;
}
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ switch (IntNo) {
+ default:
+ llvm_unreachable(
+ "Don't know how to custom type legalize this intrinsic!");
+ case Intrinsic::riscv_vmv_x_s: {
+ EVT VT = N->getValueType(0);
+ assert((VT == MVT::i8 || VT == MVT::i16 ||
+ (Subtarget.is64Bit() && VT == MVT::i32)) &&
+ "Unexpected custom legalisation!");
+ SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
+ Subtarget.getXLenVT(), N->getOperand(1));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
+ break;
+ }
+ }
+ break;
+ }
}
}
// more precise answer could be calculated for SRAW depending on known
// bits in the shift amount.
return 33;
+ case RISCVISD::VMV_X_S:
+ // The number of sign bits of the scalar result is computed by obtaining the
+ // element type of the input vector operand, substracting its width from the
+ // XLEN, and then adding one (sign bit within the element type).
+ return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
}
return 1;
NODE_NAME_CASE(GREVIW)
NODE_NAME_CASE(GORCI)
NODE_NAME_CASE(GORCIW)
+ NODE_NAME_CASE(VMV_X_S)
}
// clang-format on
return nullptr;
GREVIW,
GORCI,
GORCIW,
+ // Vector Extension
+ // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT
+ // sign extended from the vector element size. NOTE: The result size will
+ // never be less than the vector element size.
+ VMV_X_S,
};
} // namespace RISCVISD
let vm = 1 in {
def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
(ins VR:$vs2), "vmv.x.s", "$vd, $vs2">;
-def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd),
- (ins GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
+let Constraints = "$vd = $vd_wb" in
+def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
+ (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
}
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
///
//===----------------------------------------------------------------------===//
+def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
+ SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
+ SDTCisInt<1>]>>;
+
// X0 has special meaning for vsetvl/vsetvli.
// rd | rs1 | AVL value | Effect on vl
//--------------------------------------------------------------
} // Predicates = [HasStdExtV, HasStdExtF]
//===----------------------------------------------------------------------===//
+// 17.1. Integer Scalar Move Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtV] in {
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
+ Uses = [VL, VTYPE] in {
+ foreach m = MxList.m in {
+ let VLMul = m.value in {
+ let SEWIndex = 2, BaseInstr = VMV_X_S in
+ def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd),
+ (ins m.vrclass:$rs2, ixlenimm:$sew),
+ []>, RISCVVPseudo;
+ let VLIndex = 3, SEWIndex = 4, BaseInstr = VMV_S_X,
+ Constraints = "$rd = $rs1" in
+ def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
+ (ins m.vrclass:$rs1, GPR:$rs2,
+ GPR:$vl, ixlenimm:$sew),
+ []>, RISCVVPseudo;
+ }
+ }
+}
+}
+
+//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtV] in {
defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
} // Predicates = [HasStdExtV, HasStdExtF]
+
+//===----------------------------------------------------------------------===//
+// 17.1. Integer Scalar Move Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtV] in {
+foreach vti = AllIntegerVectors in {
+ def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
+ (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.SEW)>;
+ def : Pat<(vti.Vector (int_riscv_vmv_s_x (vti.Vector vti.RegClass:$rs1),
+ GPR:$rs2, GPR:$vl)),
+ (!cast<Instruction>("PseudoVMV_S_X_" # vti.LMul.MX)
+ (vti.Vector $rs1), $rs2, (NoX0 GPR:$vl), vti.SEW)>;
+}
+} // Predicates = [HasStdExtV]
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i32)
+
+define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2)
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i32)
+
+define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2)
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i32)
+
+define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2)
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i32)
+
+define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2)
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i32)
+
+define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2)
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i32)
+
+define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2)
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i32)
+
+define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2)
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16, i32)
+
+define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2)
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16, i32)
+
+define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2)
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16, i32)
+
+define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2)
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16, i32)
+
+define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2)
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i16, i32)
+
+define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2)
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i16, i32)
+
+define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2)
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32, i32)
+
+define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2)
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32, i32)
+
+define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2)
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32, i32)
+
+define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2)
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32, i32)
+
+define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2)
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i32, i32)
+
+define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2)
+ ret <vscale x 16 x i32> %a
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i64);
+
+define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8> %0, i8 %1, i64 %2)
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i64);
+
+define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8> %0, i8 %1, i64 %2)
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i64);
+
+define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8> %0, i8 %1, i64 %2)
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i64);
+
+define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8> %0, i8 %1, i64 %2)
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i64);
+
+define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8> %0, i8 %1, i64 %2)
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i64);
+
+define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8> %0, i8 %1, i64 %2)
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i64);
+
+define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8> %0, i8 %1, i64 %2)
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16, i64);
+
+define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16> %0, i16 %1, i64 %2)
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16, i64);
+
+define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16> %0, i16 %1, i64 %2)
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16, i64);
+
+define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16> %0, i16 %1, i64 %2)
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16, i64);
+
+define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16> %0, i16 %1, i64 %2)
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i16, i64);
+
+define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16> %0, i16 %1, i64 %2)
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i16, i64);
+
+define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16> %0, i16 %1, i64 %2)
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32, i64);
+
+define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32> %0, i32 %1, i64 %2)
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32, i64);
+
+define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32> %0, i32 %1, i64 %2)
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32, i64);
+
+define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32> %0, i32 %1, i64 %2)
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32, i64);
+
+define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32> %0, i32 %1, i64 %2)
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i32, i64);
+
+define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32> %0, i32 %1, i64 %2)
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64, i64);
+
+define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2)
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64>, i64, i64);
+
+define <vscale x 2 x i64> @intrinsic_vmv.s.x_x_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2)
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64>, i64, i64);
+
+define <vscale x 4 x i64> @intrinsic_vmv.s.x_x_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2)
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64>, i64, i64);
+
+define <vscale x 8 x i64> @intrinsic_vmv.s.x_x_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2)
+ ret <vscale x 8 x i64> %a
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> %0)
+ ret i8 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( <vscale x 16 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16> %0)
+ ret i16 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32> %0)
+ ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32> %0)
+ ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32> %0)
+ ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32> %0)
+ ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32> %0)
+ ret i32 %a
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %0)
+ ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> %0)
+ ret i8 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( <vscale x 16 x i16> %0)
+ ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16> %0)
+ ret i16 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32> %0)
+ ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32> %0)
+ ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32> %0)
+ ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32> %0)
+ ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32> %0)
+ ret i32 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv1i64(<vscale x 1 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64> %0)
+ ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv2i64(<vscale x 2 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64> %0)
+ ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv4i64(<vscale x 4 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64> %0)
+ ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv8i64(<vscale x 8 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.x.s a0, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> %0)
+ ret i64 %a
+}