default:
break;
case 'f':
+ case 'v':
return C_RegisterClass;
case 'I':
case 'J':
if (Subtarget.hasStdExtD() && VT == MVT::f64)
return std::make_pair(0U, &RISCV::FPR64RegClass);
break;
+ case 'v':
+ for (const auto *RC :
+ {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
+ &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
+ if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
+ return std::make_pair(0U, RC);
+ }
+ break;
default:
break;
}
}
}
+ if (Subtarget.hasStdExtV()) {
+ Register VReg = StringSwitch<Register>(Constraint.lower())
+ .Case("{v0}", RISCV::V0)
+ .Case("{v1}", RISCV::V1)
+ .Case("{v2}", RISCV::V2)
+ .Case("{v3}", RISCV::V3)
+ .Case("{v4}", RISCV::V4)
+ .Case("{v5}", RISCV::V5)
+ .Case("{v6}", RISCV::V6)
+ .Case("{v7}", RISCV::V7)
+ .Case("{v8}", RISCV::V8)
+ .Case("{v9}", RISCV::V9)
+ .Case("{v10}", RISCV::V10)
+ .Case("{v11}", RISCV::V11)
+ .Case("{v12}", RISCV::V12)
+ .Case("{v13}", RISCV::V13)
+ .Case("{v14}", RISCV::V14)
+ .Case("{v15}", RISCV::V15)
+ .Case("{v16}", RISCV::V16)
+ .Case("{v17}", RISCV::V17)
+ .Case("{v18}", RISCV::V18)
+ .Case("{v19}", RISCV::V19)
+ .Case("{v20}", RISCV::V20)
+ .Case("{v21}", RISCV::V21)
+ .Case("{v22}", RISCV::V22)
+ .Case("{v23}", RISCV::V23)
+ .Case("{v24}", RISCV::V24)
+ .Case("{v25}", RISCV::V25)
+ .Case("{v26}", RISCV::V26)
+ .Case("{v27}", RISCV::V27)
+ .Case("{v28}", RISCV::V28)
+ .Case("{v29}", RISCV::V29)
+ .Case("{v30}", RISCV::V30)
+ .Case("{v31}", RISCV::V31)
+ .Default(RISCV::NoRegister);
+ if (VReg != RISCV::NoRegister) {
+ if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
+ return std::make_pair(VReg, &RISCV::VMRegClass);
+ if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
+ return std::make_pair(VReg, &RISCV::VRRegClass);
+ for (const auto *RC :
+ {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
+ if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
+ VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
+ return std::make_pair(VReg, RC);
+ }
+ }
+ }
+ }
+
return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
return false;
}
+bool RISCVTargetLowering::splitValueIntoRegisterParts(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
+ unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
+ EVT ValueVT = Val.getValueType();
+ if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
+ LLVMContext &Context = *DAG.getContext();
+ EVT ValueEltVT = ValueVT.getVectorElementType();
+ EVT PartEltVT = PartVT.getVectorElementType();
+ unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
+ unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
+ if (PartVTBitSize % ValueVTBitSize == 0) {
+ // If the element types are different, bitcast to the same element type of
+ // PartVT first.
+ if (ValueEltVT != PartEltVT) {
+ unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
+ assert(Count != 0 && "The number of element should not be zero.");
+ EVT SameEltTypeVT =
+ EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
+ Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
+ }
+ Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
+ Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
+ Parts[0] = Val;
+ return true;
+ }
+ }
+ return false;
+}
+
+SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
+ SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
+ MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
+ if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
+ LLVMContext &Context = *DAG.getContext();
+ SDValue Val = Parts[0];
+ EVT ValueEltVT = ValueVT.getVectorElementType();
+ EVT PartEltVT = PartVT.getVectorElementType();
+ unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
+ unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
+ if (PartVTBitSize % ValueVTBitSize == 0) {
+ EVT SameEltTypeVT = ValueVT;
+ // If the element types are different, convert it to the same element type
+ // of PartVT.
+ if (ValueEltVT != PartEltVT) {
+ unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
+ assert(Count != 0 && "The number of element should not be zero.");
+ SameEltTypeVT =
+ EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
+ }
+ Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
+ DAG.getConstant(0, DL, Subtarget.getXLenVT()));
+ if (ValueEltVT != PartEltVT)
+ Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
+ return Val;
+ }
+ }
+ return SDValue();
+}
+
#define GET_REGISTER_MATCHER
#include "RISCVGenAsmMatcher.inc"
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v < %s \
+; RUN: --verify-machineinstrs | FileCheck %s
+
+define <vscale x 1 x i1> @test_1xi1(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
+; CHECK-LABEL: test_1xi1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vmand.mm v0, v0, v8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2)
+ ret <vscale x 1 x i1> %0
+}
+
+define <vscale x 2 x i1> @test_2xi1(<vscale x 2 x i1> %in, <vscale x 2 x i1> %in2) nounwind {
+; CHECK-LABEL: test_2xi1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vmand.mm v0, v0, v8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 2 x i1> %in, <vscale x 2 x i1> %in2)
+ ret <vscale x 2 x i1> %0
+}
+
+define <vscale x 4 x i1> @test_4xi1(<vscale x 4 x i1> %in, <vscale x 4 x i1> %in2) nounwind {
+; CHECK-LABEL: test_4xi1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vmand.mm v0, v0, v8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 4 x i1> %in, <vscale x 4 x i1> %in2)
+ ret <vscale x 4 x i1> %0
+}
+
+define <vscale x 8 x i1> @test_8xi1(<vscale x 8 x i1> %in, <vscale x 8 x i1> %in2) nounwind {
+; CHECK-LABEL: test_8xi1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vmand.mm v0, v0, v8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 8 x i1> %in, <vscale x 8 x i1> %in2)
+ ret <vscale x 8 x i1> %0
+}
+
+define <vscale x 16 x i1> @test_16xi1(<vscale x 16 x i1> %in, <vscale x 16 x i1> %in2) nounwind {
+; CHECK-LABEL: test_16xi1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vmand.mm v0, v0, v8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 16 x i1> %in, <vscale x 16 x i1> %in2)
+ ret <vscale x 16 x i1> %0
+}
+
+define <vscale x 32 x i1> @test_32xi1(<vscale x 32 x i1> %in, <vscale x 32 x i1> %in2) nounwind {
+; CHECK-LABEL: test_32xi1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vmand.mm v0, v0, v8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 32 x i1> %in, <vscale x 32 x i1> %in2)
+ ret <vscale x 32 x i1> %0
+}
+
+define <vscale x 64 x i1> @test_64xi1(<vscale x 64 x i1> %in, <vscale x 64 x i1> %in2) nounwind {
+; CHECK-LABEL: test_64xi1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vmand.mm v0, v0, v8
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 64 x i1> %in, <vscale x 64 x i1> %in2)
+ ret <vscale x 64 x i1> %0
+}
+
+define <vscale x 1 x i64> @test_1xi64(<vscale x 1 x i64> %in, <vscale x 1 x i64> %in2) nounwind {
+; CHECK-LABEL: test_1xi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i64> %in, <vscale x 1 x i64> %in2)
+ ret <vscale x 1 x i64> %0
+}
+
+define <vscale x 2 x i64> @test_2xi64(<vscale x 2 x i64> %in, <vscale x 2 x i64> %in2) nounwind {
+; CHECK-LABEL: test_2xi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i64> %in, <vscale x 2 x i64> %in2)
+ ret <vscale x 2 x i64> %0
+}
+
+define <vscale x 4 x i64> @test_4xi64(<vscale x 4 x i64> %in, <vscale x 4 x i64> %in2) nounwind {
+; CHECK-LABEL: test_4xi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i64> %in, <vscale x 4 x i64> %in2)
+ ret <vscale x 4 x i64> %0
+}
+
+define <vscale x 8 x i64> @test_8xi64(<vscale x 8 x i64> %in, <vscale x 8 x i64> %in2) nounwind {
+; CHECK-LABEL: test_8xi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i64> %in, <vscale x 8 x i64> %in2)
+ ret <vscale x 8 x i64> %0
+}
+
+define <vscale x 1 x i32> @test_1xi32(<vscale x 1 x i32> %in, <vscale x 1 x i32> %in2) nounwind {
+; CHECK-LABEL: test_1xi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i32> %in, <vscale x 1 x i32> %in2)
+ ret <vscale x 1 x i32> %0
+}
+
+define <vscale x 2 x i32> @test_2xi32(<vscale x 2 x i32> %in, <vscale x 2 x i32> %in2) nounwind {
+; CHECK-LABEL: test_2xi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i32> %in, <vscale x 2 x i32> %in2)
+ ret <vscale x 2 x i32> %0
+}
+
+define <vscale x 4 x i32> @test_4xi32(<vscale x 4 x i32> %in, <vscale x 4 x i32> %in2) nounwind {
+; CHECK-LABEL: test_4xi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i32> %in, <vscale x 4 x i32> %in2)
+ ret <vscale x 4 x i32> %0
+}
+
+define <vscale x 8 x i32> @test_8xi32(<vscale x 8 x i32> %in, <vscale x 8 x i32> %in2) nounwind {
+; CHECK-LABEL: test_8xi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i32> %in, <vscale x 8 x i32> %in2)
+ ret <vscale x 8 x i32> %0
+}
+
+define <vscale x 16 x i32> @test_16xi32(<vscale x 16 x i32> %in, <vscale x 16 x i32> %in2) nounwind {
+; CHECK-LABEL: test_16xi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i32> %in, <vscale x 16 x i32> %in2)
+ ret <vscale x 16 x i32> %0
+}
+
+define <vscale x 1 x i16> @test_1xi16(<vscale x 1 x i16> %in, <vscale x 1 x i16> %in2) nounwind {
+; CHECK-LABEL: test_1xi16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i16> %in, <vscale x 1 x i16> %in2)
+ ret <vscale x 1 x i16> %0
+}
+
+define <vscale x 2 x i16> @test_2xi16(<vscale x 2 x i16> %in, <vscale x 2 x i16> %in2) nounwind {
+; CHECK-LABEL: test_2xi16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i16> %in, <vscale x 2 x i16> %in2)
+ ret <vscale x 2 x i16> %0
+}
+
+define <vscale x 4 x i16> @test_4xi16(<vscale x 4 x i16> %in, <vscale x 4 x i16> %in2) nounwind {
+; CHECK-LABEL: test_4xi16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i16> %in, <vscale x 4 x i16> %in2)
+ ret <vscale x 4 x i16> %0
+}
+
+define <vscale x 8 x i16> @test_8xi16(<vscale x 8 x i16> %in, <vscale x 8 x i16> %in2) nounwind {
+; CHECK-LABEL: test_8xi16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i16> %in, <vscale x 8 x i16> %in2)
+ ret <vscale x 8 x i16> %0
+}
+
+define <vscale x 16 x i16> @test_16xi16(<vscale x 16 x i16> %in, <vscale x 16 x i16> %in2) nounwind {
+; CHECK-LABEL: test_16xi16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i16> %in, <vscale x 16 x i16> %in2)
+ ret <vscale x 16 x i16> %0
+}
+
+define <vscale x 32 x i16> @test_32xi16(<vscale x 32 x i16> %in, <vscale x 32 x i16> %in2) nounwind {
+; CHECK-LABEL: test_32xi16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 32 x i16> %in, <vscale x 32 x i16> %in2)
+ ret <vscale x 32 x i16> %0
+}
+
+define <vscale x 1 x i8> @test_1xi8(<vscale x 1 x i8> %in, <vscale x 1 x i8> %in2) nounwind {
+; CHECK-LABEL: test_1xi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i8> %in, <vscale x 1 x i8> %in2)
+ ret <vscale x 1 x i8> %0
+}
+
+define <vscale x 2 x i8> @test_2xi8(<vscale x 2 x i8> %in, <vscale x 2 x i8> %in2) nounwind {
+; CHECK-LABEL: test_2xi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 2 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i8> %in, <vscale x 2 x i8> %in2)
+ ret <vscale x 2 x i8> %0
+}
+
+define <vscale x 4 x i8> @test_4xi8(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
+; CHECK-LABEL: test_4xi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2)
+ ret <vscale x 4 x i8> %0
+}
+
+define <vscale x 8 x i8> @test_8xi8(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
+; CHECK-LABEL: test_8xi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2)
+ ret <vscale x 8 x i8> %0
+}
+
+define <vscale x 16 x i8> @test_16xi8(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
+; CHECK-LABEL: test_16xi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2)
+ ret <vscale x 16 x i8> %0
+}
+
+define <vscale x 32 x i8> @test_32xi8(<vscale x 32 x i8> %in, <vscale x 32 x i8> %in2) nounwind {
+; CHECK-LABEL: test_32xi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 32 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 32 x i8> %in, <vscale x 32 x i8> %in2)
+ ret <vscale x 32 x i8> %0
+}
+
+define <vscale x 64 x i8> @test_64xi8(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2) nounwind {
+; CHECK-LABEL: test_64xi8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 64 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2)
+ ret <vscale x 64 x i8> %0
+}
+
+define <vscale x 4 x i8> @test_specify_reg_mf2(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
+; CHECK-LABEL: test_specify_reg_mf2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v2, v9
+; CHECK-NEXT: vmv1r.v v1, v8
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v0, v1, v2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: vmv1r.v v8, v0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 4 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2)
+ ret <vscale x 4 x i8> %0
+}
+
+define <vscale x 8 x i8> @test_specify_reg_m1(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
+; CHECK-LABEL: test_specify_reg_m1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v2, v9
+; CHECK-NEXT: vmv1r.v v1, v8
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v0, v1, v2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: vmv1r.v v8, v0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 8 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2)
+ ret <vscale x 8 x i8> %0
+}
+
+define <vscale x 16 x i8> @test_specify_reg_m2(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
+; CHECK-LABEL: test_specify_reg_m2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv2r.v v4, v10
+; CHECK-NEXT: vmv2r.v v2, v8
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vadd.vv v0, v2, v4
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: vmv2r.v v8, v0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 16 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v2},{v4}"(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2)
+ ret <vscale x 16 x i8> %0
+}
+
+define <vscale x 1 x i1> @test_specify_reg_mask(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
+; CHECK-LABEL: test_specify_reg_mask:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v2, v8
+; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vmand.mm v0, v1, v2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call <vscale x 1 x i1> asm "vmand.mm $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2)
+ ret <vscale x 1 x i1> %0
+}