return Reg - RISCV::F0_D + RISCV::F0_F;
}
+static MCRegister convertVRToVRMx(const MCRegisterInfo &RI, MCRegister Reg,
+ unsigned Kind) {
+ unsigned RegClassID;
+ if (Kind == MCK_VRM2)
+ RegClassID = RISCV::VRM2RegClassID;
+ else if (Kind == MCK_VRM4)
+ RegClassID = RISCV::VRM4RegClassID;
+ else if (Kind == MCK_VRM8)
+ RegClassID = RISCV::VRM8RegClassID;
+ else
+ return 0;
+ return RI.getMatchingSuperReg(Reg, RISCV::sub_vrm1_0,
+ &RISCVMCRegisterClasses[RegClassID]);
+}
+
unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
unsigned Kind) {
RISCVOperand &Op = static_cast<RISCVOperand &>(AsmOp);
RISCVMCRegisterClasses[RISCV::FPR64RegClassID].contains(Reg);
bool IsRegFPR64C =
RISCVMCRegisterClasses[RISCV::FPR64CRegClassID].contains(Reg);
+ bool IsRegVR = RISCVMCRegisterClasses[RISCV::VRRegClassID].contains(Reg);
// As the parser couldn't differentiate an FPR32 from an FPR64, coerce the
// register from FPR64 to FPR32 or FPR64C to FPR32C if necessary.
Op.Reg.RegNum = convertFPR64ToFPR16(Reg);
return Match_Success;
}
+ // As the parser couldn't differentiate an VRM2/VRM4/VRM8 from an VR, coerce
+ // the register from VR to VRM2/VRM4/VRM8 if necessary.
+ if (IsRegVR && (Kind == MCK_VRM2 || Kind == MCK_VRM4 || Kind == MCK_VRM8)) {
+ Op.Reg.RegNum = convertVRToVRMx(*getContext().getRegisterInfo(), Reg, Kind);
+ if (Op.Reg.RegNum == 0)
+ return Match_InvalidOperand;
+ return Match_Success;
+ }
return Match_InvalidOperand;
}
return MCDisassembler::Success;
}
+static DecodeStatus DecodeVRM2RegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo >= 32)
+ return MCDisassembler::Fail;
+
+ if (RegNo % 2)
+ return MCDisassembler::Fail;
+
+ const RISCVDisassembler *Dis =
+ static_cast<const RISCVDisassembler *>(Decoder);
+ const MCRegisterInfo *RI = Dis->getContext().getRegisterInfo();
+ MCRegister Reg =
+ RI->getMatchingSuperReg(RISCV::V0 + RegNo, RISCV::sub_vrm1_0,
+ &RISCVMCRegisterClasses[RISCV::VRM2RegClassID]);
+
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeVRM4RegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo >= 32)
+ return MCDisassembler::Fail;
+
+ if (RegNo % 4)
+ return MCDisassembler::Fail;
+
+ const RISCVDisassembler *Dis =
+ static_cast<const RISCVDisassembler *>(Decoder);
+ const MCRegisterInfo *RI = Dis->getContext().getRegisterInfo();
+ MCRegister Reg =
+ RI->getMatchingSuperReg(RISCV::V0 + RegNo, RISCV::sub_vrm1_0,
+ &RISCVMCRegisterClasses[RISCV::VRM4RegClassID]);
+
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeVRM8RegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo >= 32)
+ return MCDisassembler::Fail;
+
+ if (RegNo % 8)
+ return MCDisassembler::Fail;
+
+ const RISCVDisassembler *Dis =
+ static_cast<const RISCVDisassembler *>(Decoder);
+ const MCRegisterInfo *RI = Dis->getContext().getRegisterInfo();
+ MCRegister Reg =
+ RI->getMatchingSuperReg(RISCV::V0 + RegNo, RISCV::sub_vrm1_0,
+ &RISCVMCRegisterClasses[RISCV::VRM8RegClassID]);
+
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus decodeVMaskReg(MCInst &Inst, uint64_t RegNo,
uint64_t Address, const void *Decoder) {
MCRegister Reg = RISCV::NoRegister;
"$vd, (${rs1}), $vs2$vm">;
// vl<nf>r.v vd, (rs1)
-class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr>
+class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
: RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
- width.Value{2-0}, (outs VR:$vd), (ins GPR:$rs1),
+ width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
opcodestr, "$vd, (${rs1})"> {
let vm = 1;
let Uses = [];
opcodestr, "$vs3, (${rs1}), $vs2$vm">;
// vs<nf>r.v vd, (rs1)
-class VWholeStore<bits<3> nf, string opcodestr>
+class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
: RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
- 0b000, (outs), (ins VR:$vs3, GPR:$rs1),
+ 0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
opcodestr, "$vs3, (${rs1})"> {
let vm = 1;
let Uses = [];
def _UNWD : VAMONoWd<amoop, width, opcodestr>;
}
-multiclass VWholeLoad<bits<3> nf, string opcodestr> {
- def E8_V : VWholeLoad<nf, LSWidth8, opcodestr # "e8.v">;
- def E16_V : VWholeLoad<nf, LSWidth16, opcodestr # "e16.v">;
- def E32_V : VWholeLoad<nf, LSWidth32, opcodestr # "e32.v">;
- def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v">;
+multiclass VWholeLoad<bits<3> nf, string opcodestr, RegisterClass VRC> {
+ def E8_V : VWholeLoad<nf, LSWidth8, opcodestr # "e8.v", VRC>;
+ def E16_V : VWholeLoad<nf, LSWidth16, opcodestr # "e16.v", VRC>;
+ def E32_V : VWholeLoad<nf, LSWidth32, opcodestr # "e32.v", VRC>;
+ def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>;
}
//===----------------------------------------------------------------------===//
def VSOXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsoxei32.v">;
def VSOXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsoxei64.v">;
-defm VL1R : VWholeLoad<1, "vl1r">;
-defm VL2R : VWholeLoad<2, "vl2r">;
-defm VL4R : VWholeLoad<4, "vl4r">;
-defm VL8R : VWholeLoad<8, "vl8r">;
+defm VL1R : VWholeLoad<1, "vl1r", VR>;
+defm VL2R : VWholeLoad<2, "vl2r", VRM2>;
+defm VL4R : VWholeLoad<4, "vl4r", VRM4>;
+defm VL8R : VWholeLoad<8, "vl8r", VRM8>;
def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
-def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VR:$vd, GPR:$rs1)>;
-def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VR:$vd, GPR:$rs1)>;
-def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VR:$vd, GPR:$rs1)>;
-
-def VS1R_V : VWholeStore<1, "vs1r.v">;
-def VS2R_V : VWholeStore<2, "vs2r.v">;
-def VS4R_V : VWholeStore<4, "vs4r.v">;
-def VS8R_V : VWholeStore<8, "vs8r.v">;
+def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
+def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
+def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
+
+def VS1R_V : VWholeStore<1, "vs1r.v", VR>;
+def VS2R_V : VWholeStore<2, "vs2r.v", VRM2>;
+def VS4R_V : VWholeStore<4, "vs4r.v", VRM4>;
+def VS8R_V : VWholeStore<8, "vs8r.v", VRM8>;
// Vector Single-Width Integer Add and Subtract
defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
defset list<VTypeInfo> AllVectors = {
defset list<VTypeInfo> AllIntegerVectors = {
defset list<VTypeInfo> NoGroupIntegerVectors = {
- def VI8MF8: VTypeInfo<vint8mf8_t, vbool64_t, 8, VR, V_MF8>;
- def VI8MF4: VTypeInfo<vint8mf4_t, vbool32_t, 8, VR, V_MF4>;
- def VI8MF2: VTypeInfo<vint8mf2_t, vbool16_t, 8, VR, V_MF2>;
+ defset list<VTypeInfo> FractionalGroupIntegerVectors = {
+ def VI8MF8: VTypeInfo<vint8mf8_t, vbool64_t, 8, VR, V_MF8>;
+ def VI8MF4: VTypeInfo<vint8mf4_t, vbool32_t, 8, VR, V_MF4>;
+ def VI8MF2: VTypeInfo<vint8mf2_t, vbool16_t, 8, VR, V_MF2>;
+ def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
+ def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
+ def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
+ }
def VI8M1: VTypeInfo<vint8m1_t, vbool8_t, 8, VR, V_M1>;
- def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
- def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, VR, V_M1>;
- def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, VR, V_M1>;
def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, VR, V_M1>;
}
defset list<VTypeInfo> AllFloatVectors = {
defset list<VTypeInfo> NoGroupFloatVectors = {
- def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
- def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
+ defset list<VTypeInfo> FractionalGroupFloatVectors = {
+ def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
+ def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
+ def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
+ }
def VF16M1: VTypeInfo<vfloat16m1_t, vbool16_t, 16, VR, V_M1, f16, FPR16>;
-
- def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
def VF32M1: VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1, f32, FPR32>;
-
def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
}
(store_instr reg_class:$rs2, RVVBaseAddr:$rs1, avl, sew)>;
}
+multiclass VPatUSLoadStoreWholeVRSDNode<LLVMType type,
+ int sew,
+ LMULInfo vlmul,
+ VReg reg_class>
+{
+ defvar load_instr =
+ !cond(!eq(vlmul.value, V_M1.value): !cast<Instruction>("VL1RE"#sew#"_V"),
+ !eq(vlmul.value, V_M2.value): !cast<Instruction>("VL2RE"#sew#"_V"),
+ !eq(vlmul.value, V_M4.value): !cast<Instruction>("VL4RE"#sew#"_V"),
+ !eq(vlmul.value, V_M8.value): !cast<Instruction>("VL8RE"#sew#"_V"));
+ defvar store_instr =
+ !cond(!eq(vlmul.value, V_M1.value): VS1R_V,
+ !eq(vlmul.value, V_M2.value): VS2R_V,
+ !eq(vlmul.value, V_M4.value): VS4R_V,
+ !eq(vlmul.value, V_M8.value): VS8R_V);
+
+ // Load
+ def : Pat<(type (load RVVBaseAddr:$rs1)),
+ (load_instr RVVBaseAddr:$rs1)>;
+ // Store
+ def : Pat<(store type:$rs2, RVVBaseAddr:$rs1),
+ (store_instr reg_class:$rs2, RVVBaseAddr:$rs1)>;
+}
+
multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m>
{
defvar load_instr = !cast<Instruction>("PseudoVLE1_V_"#m.BX);
let Predicates = [HasStdExtV] in {
// 7.4. Vector Unit-Stride Instructions
-foreach vti = AllVectors in
+foreach vti = !listconcat(FractionalGroupIntegerVectors,
+ FractionalGroupFloatVectors) in
defm "" : VPatUSLoadStoreSDNode<vti.Vector, vti.SEW, vti.LMul,
vti.AVL, vti.RegClass>;
+foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in
+ defm "" : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.SEW, vti.LMul,
+ vti.RegClass>;
+foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in
+ defm "" : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.SEW, vti.LMul,
+ vti.RegClass>;
foreach mti = AllMasks in
defm "" : VPatUSLoadStoreMaskSDNode<mti>;
ret void
}
-; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
-; PRE-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+; PRE-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8)
+; PRE-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8)
; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 64, implicit $vl, implicit $vtype
-; PRE-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+; PRE-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8)
+; POST-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8)
+; POST-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8)
; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype
-; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
-; POST-INSERTER: dead %7:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype
-; POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
-; POST-INSERTER: dead %8:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype
; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, -1, implicit $vl, implicit $vtype
-; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype
-; POST-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+; POST-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8)
define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, <vscale x 4 x i16> *%pb) nounwind {
; CHECK-LABEL: vadd_vint16m1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e16,m1,ta,mu
-; CHECK-NEXT: vle16.v v25, (a1)
-; CHECK-NEXT: vle16.v v26, (a2)
+; CHECK-NEXT: vl1re16.v v25, (a1)
+; CHECK-NEXT: vl1re16.v v26, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
; CHECK-NEXT: vadd.vv v25, v25, v26
-; CHECK-NEXT: vse16.v v25, (a0)
+; CHECK-NEXT: vs1r.v v25, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pa
%vb = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pb
define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, <vscale x 8 x i16> *%pb) nounwind {
; CHECK-LABEL: vadd_vint16m2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
-; CHECK-NEXT: vle16.v v26, (a1)
-; CHECK-NEXT: vle16.v v28, (a2)
+; CHECK-NEXT: vl2re16.v v26, (a1)
+; CHECK-NEXT: vl2re16.v v28, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
; CHECK-NEXT: vadd.vv v26, v26, v28
-; CHECK-NEXT: vse16.v v26, (a0)
+; CHECK-NEXT: vs2r.v v26, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pa
%vb = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pb
define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, <vscale x 16 x i16> *%pb) nounwind {
; CHECK-LABEL: vadd_vint16m4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a1)
-; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vl4re16.v v28, (a1)
+; CHECK-NEXT: vl4re16.v v8, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
; CHECK-NEXT: vadd.vv v28, v28, v8
-; CHECK-NEXT: vse16.v v28, (a0)
+; CHECK-NEXT: vs4r.v v28, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pa
%vb = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pb
define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, <vscale x 32 x i16> *%pb) nounwind {
; CHECK-LABEL: vadd_vint16m8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v8, (a1)
-; CHECK-NEXT: vle16.v v16, (a2)
+; CHECK-NEXT: vl8re16.v v8, (a1)
+; CHECK-NEXT: vl8re16.v v16, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
; CHECK-NEXT: vadd.vv v8, v8, v16
-; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pa
%vb = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pb
define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, <vscale x 2 x i32> *%pb) nounwind {
; CHECK-LABEL: vadd_vint32m1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e32,m1,ta,mu
-; CHECK-NEXT: vle32.v v25, (a1)
-; CHECK-NEXT: vle32.v v26, (a2)
+; CHECK-NEXT: vl1re32.v v25, (a1)
+; CHECK-NEXT: vl1re32.v v26, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu
; CHECK-NEXT: vadd.vv v25, v25, v26
-; CHECK-NEXT: vse32.v v25, (a0)
+; CHECK-NEXT: vs1r.v v25, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pa
%vb = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pb
define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, <vscale x 4 x i32> *%pb) nounwind {
; CHECK-LABEL: vadd_vint32m2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e32,m2,ta,mu
-; CHECK-NEXT: vle32.v v26, (a1)
-; CHECK-NEXT: vle32.v v28, (a2)
+; CHECK-NEXT: vl2re32.v v26, (a1)
+; CHECK-NEXT: vl2re32.v v28, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu
; CHECK-NEXT: vadd.vv v26, v26, v28
-; CHECK-NEXT: vse32.v v26, (a0)
+; CHECK-NEXT: vs2r.v v26, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pa
%vb = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pb
define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, <vscale x 8 x i32> *%pb) nounwind {
; CHECK-LABEL: vadd_vint32m4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a1)
-; CHECK-NEXT: vle32.v v8, (a2)
+; CHECK-NEXT: vl4re32.v v28, (a1)
+; CHECK-NEXT: vl4re32.v v8, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
; CHECK-NEXT: vadd.vv v28, v28, v8
-; CHECK-NEXT: vse32.v v28, (a0)
+; CHECK-NEXT: vs4r.v v28, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pa
%vb = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pb
define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, <vscale x 16 x i32> *%pb) nounwind {
; CHECK-LABEL: vadd_vint32m8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v8, (a1)
-; CHECK-NEXT: vle32.v v16, (a2)
+; CHECK-NEXT: vl8re32.v v8, (a1)
+; CHECK-NEXT: vl8re32.v v16, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
; CHECK-NEXT: vadd.vv v8, v8, v16
-; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pa
%vb = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pb
define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, <vscale x 1 x i64> *%pb) nounwind {
; CHECK-LABEL: vadd_vint64m1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e64,m1,ta,mu
-; CHECK-NEXT: vle64.v v25, (a1)
-; CHECK-NEXT: vle64.v v26, (a2)
+; CHECK-NEXT: vl1re64.v v25, (a1)
+; CHECK-NEXT: vl1re64.v v26, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu
; CHECK-NEXT: vadd.vv v25, v25, v26
-; CHECK-NEXT: vse64.v v25, (a0)
+; CHECK-NEXT: vs1r.v v25, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pa
%vb = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pb
define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, <vscale x 2 x i64> *%pb) nounwind {
; CHECK-LABEL: vadd_vint64m2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e64,m2,ta,mu
-; CHECK-NEXT: vle64.v v26, (a1)
-; CHECK-NEXT: vle64.v v28, (a2)
+; CHECK-NEXT: vl2re64.v v26, (a1)
+; CHECK-NEXT: vl2re64.v v28, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu
; CHECK-NEXT: vadd.vv v26, v26, v28
-; CHECK-NEXT: vse64.v v26, (a0)
+; CHECK-NEXT: vs2r.v v26, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pa
%vb = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pb
define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, <vscale x 4 x i64> *%pb) nounwind {
; CHECK-LABEL: vadd_vint64m4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e64,m4,ta,mu
-; CHECK-NEXT: vle64.v v28, (a1)
-; CHECK-NEXT: vle64.v v8, (a2)
+; CHECK-NEXT: vl4re64.v v28, (a1)
+; CHECK-NEXT: vl4re64.v v8, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu
; CHECK-NEXT: vadd.vv v28, v28, v8
-; CHECK-NEXT: vse64.v v28, (a0)
+; CHECK-NEXT: vs4r.v v28, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pa
%vb = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pb
define void @vadd_vint64m8(<vscale x 8 x i64> *%pc, <vscale x 8 x i64> *%pa, <vscale x 8 x i64> *%pb) nounwind {
; CHECK-LABEL: vadd_vint64m8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v8, (a1)
-; CHECK-NEXT: vle64.v v16, (a2)
+; CHECK-NEXT: vl8re64.v v8, (a1)
+; CHECK-NEXT: vl8re64.v v16, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; CHECK-NEXT: vadd.vv v8, v8, v16
-; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pa
%vb = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pb
define void @vadd_vint8m1(<vscale x 8 x i8> *%pc, <vscale x 8 x i8> *%pa, <vscale x 8 x i8> *%pb) nounwind {
; CHECK-LABEL: vadd_vint8m1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
-; CHECK-NEXT: vle8.v v25, (a1)
-; CHECK-NEXT: vle8.v v26, (a2)
+; CHECK-NEXT: vl1r.v v25, (a1)
+; CHECK-NEXT: vl1r.v v26, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
; CHECK-NEXT: vadd.vv v25, v25, v26
-; CHECK-NEXT: vse8.v v25, (a0)
+; CHECK-NEXT: vs1r.v v25, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pa
%vb = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pb
define void @vadd_vint8m2(<vscale x 16 x i8> *%pc, <vscale x 16 x i8> *%pa, <vscale x 16 x i8> *%pb) nounwind {
; CHECK-LABEL: vadd_vint8m2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
-; CHECK-NEXT: vle8.v v26, (a1)
-; CHECK-NEXT: vle8.v v28, (a2)
+; CHECK-NEXT: vl2r.v v26, (a1)
+; CHECK-NEXT: vl2r.v v28, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu
; CHECK-NEXT: vadd.vv v26, v26, v28
-; CHECK-NEXT: vse8.v v26, (a0)
+; CHECK-NEXT: vs2r.v v26, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pa
%vb = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pb
define void @vadd_vint8m4(<vscale x 32 x i8> *%pc, <vscale x 32 x i8> *%pa, <vscale x 32 x i8> *%pb) nounwind {
; CHECK-LABEL: vadd_vint8m4:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a1)
-; CHECK-NEXT: vle8.v v8, (a2)
+; CHECK-NEXT: vl4r.v v28, (a1)
+; CHECK-NEXT: vl4r.v v8, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu
; CHECK-NEXT: vadd.vv v28, v28, v8
-; CHECK-NEXT: vse8.v v28, (a0)
+; CHECK-NEXT: vs4r.v v28, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pa
%vb = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pb
define void @vadd_vint8m8(<vscale x 64 x i8> *%pc, <vscale x 64 x i8> *%pa, <vscale x 64 x i8> *%pb) nounwind {
; CHECK-LABEL: vadd_vint8m8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v8, (a1)
-; CHECK-NEXT: vle8.v v16, (a2)
+; CHECK-NEXT: vl8r.v v8, (a1)
+; CHECK-NEXT: vl8r.v v16, (a2)
+; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu
; CHECK-NEXT: vadd.vv v8, v8, v16
-; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: ret
%va = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pa
%vb = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pb
define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) {
; CHECK-LABEL: vfmadd_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
; CHECK-NEXT: vfmacc.vv v8, v16, v24
; CHECK-NEXT: ret
%vd = call <vscale x 32 x half> @llvm.fma.v32f16(<vscale x 32 x half> %vc, <vscale x 32 x half> %vb, <vscale x 32 x half> %va)
define <vscale x 16 x float> @vfmadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) {
; CHECK-LABEL: vfmadd_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu
; CHECK-NEXT: vfmadd.vv v8, v24, v16
; CHECK-NEXT: ret
%vd = call <vscale x 16 x float> @llvm.fma.v16f32(<vscale x 16 x float> %vc, <vscale x 16 x float> %va, <vscale x 16 x float> %vb)
define <vscale x 8 x double> @vfmadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) {
; CHECK-LABEL: vfmadd_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; CHECK-NEXT: vfmacc.vv v8, v16, v24
; CHECK-NEXT: ret
%vd = call <vscale x 8 x double> @llvm.fma.v8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x double> %va)
define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) {
; CHECK-LABEL: vfmsub_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
; CHECK-NEXT: vfmsac.vv v8, v16, v24
; CHECK-NEXT: ret
%neg = fneg <vscale x 32 x half> %va
define <vscale x 16 x float> @vfmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) {
; CHECK-LABEL: vfmsub_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu
; CHECK-NEXT: vfmsub.vv v8, v24, v16
; CHECK-NEXT: ret
%neg = fneg <vscale x 16 x float> %vb
define <vscale x 8 x double> @vfmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) {
; CHECK-LABEL: vfmsub_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; CHECK-NEXT: vfmsac.vv v8, v16, v24
; CHECK-NEXT: ret
%neg = fneg <vscale x 8 x double> %va
define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) {
; CHECK-LABEL: vfnmsub_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
; CHECK-NEXT: vfnmadd.vv v8, v24, v16
; CHECK-NEXT: ret
%neg = fneg <vscale x 32 x half> %vc
define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) {
; CHECK-LABEL: vfnmsub_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu
; CHECK-NEXT: vfnmadd.vv v8, v24, v16
; CHECK-NEXT: ret
%neg = fneg <vscale x 16 x float> %va
define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) {
; CHECK-LABEL: vfnmsub_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; CHECK-NEXT: vfnmacc.vv v8, v16, v24
; CHECK-NEXT: ret
%neg = fneg <vscale x 8 x double> %vb
define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) {
; CHECK-LABEL: vfnmsub_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
; CHECK-NEXT: vfnmsub.vv v8, v24, v16
; CHECK-NEXT: ret
%neg = fneg <vscale x 32 x half> %vc
define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) {
; CHECK-LABEL: vfnmsub_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu
; CHECK-NEXT: vfnmsub.vv v8, v24, v16
; CHECK-NEXT: ret
%neg = fneg <vscale x 16 x float> %va
define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) {
; CHECK-LABEL: vfnmsub_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; CHECK-NEXT: vfnmsac.vv v8, v16, v24
; CHECK-NEXT: ret
%neg = fneg <vscale x 8 x double> %vb
define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu
-; CHECK-NEXT: vle16.v v26, (a0)
+; CHECK-NEXT: vl2re16.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu
-; CHECK-NEXT: vle16.v v26, (a0)
+; CHECK-NEXT: vl2re16.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x half> @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x float> @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x double> @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu
-; CHECK-NEXT: vle16.v v26, (a0)
+; CHECK-NEXT: vl2re16.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vl4re8.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu
; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vl4re8.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu
; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vl4re8.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu
; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vl4re8.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu
; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vl4re8.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu
; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vl4re8.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu
; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vl4re8.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu
; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
-; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vl4re8.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu
; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
-; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vl4re16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
-; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
-; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vl8re8.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 32 x i16> @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
-; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vl8re16.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 16 x i32> @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
-; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
define i32 @foo({ {<vscale x 2 x i32>, <vscale x 2 x i32>}, i32 } %x, <vscale x 2 x i32>* %y, <vscale x 2 x i32>* %z) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli a3, zero, e32,m1,ta,mu
-; CHECK-NEXT: vse32.v v8, (a1)
-; CHECK-NEXT: vse32.v v9, (a2)
+; CHECK-NEXT: vs1r.v v8, (a1)
+; CHECK-NEXT: vs1r.v v9, (a2)
; CHECK-NEXT: ret
entry:
br label %return
vfncvt.xu.f.w v0, v4, v0.t
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
# CHECK-ERROR-LABEL: vfncvt.xu.f.w v0, v4, v0.t
+
+vl2re8.v v1, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl4re8.v v1, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl4re8.v v2, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl4re8.v v3, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl8re8.v v1, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl8re8.v v2, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl8re8.v v3, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl8re8.v v4, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl8re8.v v5, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl8re8.v v6, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vl8re8.v v7, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs2r.v v1, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs4r.v v1, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs4r.v v2, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs4r.v v3, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs8r.v v1, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs8r.v v2, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs8r.v v3, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs8r.v v4, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs8r.v v5, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs8r.v v6, (a0)
+# CHECK-ERROR: invalid operand for instruction
+
+vs8r.v v7, (a0)
+# CHECK-ERROR: invalid operand for instruction
+