MVT XLenVT = Subtarget->getXLenVT();
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
unsigned CurOp = 2;
- SmallVector<SDValue, 7> Operands;
+ SmallVector<SDValue, 8> Operands;
if (IsMasked) {
SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
Node->op_begin() + CurOp + NF);
Operands.push_back(Base); // Base pointer.
if (IsStrided)
Operands.push_back(Node->getOperand(CurOp++)); // Stride.
- if (IsMasked)
- Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
Operands.push_back(SEW);
- Operands.push_back(Node->getOperand(0)); // Chain.
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
const RISCV::VLSEGPseudo *P =
RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize,
static_cast<unsigned>(LMUL));
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
unsigned CurOp = 2;
SmallVector<SDValue, 7> Operands;
if (IsMasked) {
SDValue Base;
SelectBaseAddr(Node->getOperand(CurOp++), Base);
Operands.push_back(Base); // Base pointer.
- if (IsMasked)
- Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
Operands.push_back(SEW);
- Operands.push_back(Node->getOperand(0)); // Chain.
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
const RISCV::VLSEGPseudo *P =
RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
ScalarSize, static_cast<unsigned>(LMUL));
MVT XLenVT = Subtarget->getXLenVT();
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
unsigned CurOp = 2;
- SmallVector<SDValue, 7> Operands;
+ SmallVector<SDValue, 8> Operands;
if (IsMasked) {
SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
Node->op_begin() + CurOp + NF);
Operands.push_back(Base); // Base pointer.
Operands.push_back(Node->getOperand(CurOp++)); // Index.
MVT IndexVT = Operands.back()->getSimpleValueType(0);
- if (IsMasked)
- Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
Operands.push_back(SEW);
- Operands.push_back(Node->getOperand(0)); // Chain.
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
- SmallVector<SDValue, 7> Operands;
+
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
+ SmallVector<SDValue, 8> Operands;
Operands.push_back(StoreVal);
unsigned CurOp = 2 + NF;
SDValue Base;
Operands.push_back(Base); // Base pointer.
if (IsStrided)
Operands.push_back(Node->getOperand(CurOp++)); // Stride.
- if (IsMasked)
- Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
Operands.push_back(SEW);
- Operands.push_back(Node->getOperand(0)); // Chain.
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
NF, IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
MachineSDNode *Store =
MVT XLenVT = Subtarget->getXLenVT();
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
- SmallVector<SDValue, 7> Operands;
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
+
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
+ SmallVector<SDValue, 8> Operands;
Operands.push_back(StoreVal);
unsigned CurOp = 2 + NF;
SDValue Base;
Operands.push_back(Base); // Base pointer.
Operands.push_back(Node->getOperand(CurOp++)); // Index.
MVT IndexVT = Operands.back()->getSimpleValueType(0);
- if (IsMasked)
- Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
Operands.push_back(SEW);
- Operands.push_back(Node->getOperand(0)); // Chain.
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
MVT XLenVT = Subtarget->getXLenVT();
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
unsigned CurOp = 2;
- SmallVector<SDValue, 7> Operands;
+ SmallVector<SDValue, 8> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
SDValue Base;
Operands.push_back(Base); // Base pointer.
Operands.push_back(Node->getOperand(CurOp++)); // Index.
MVT IndexVT = Operands.back()->getSimpleValueType(0);
- if (IsMasked)
- Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
Operands.push_back(SEW);
- Operands.push_back(Node->getOperand(0)); // Chain.
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
unsigned SEWImm = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT);
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
unsigned CurOp = 2;
- SmallVector<SDValue, 7> Operands;
+ SmallVector<SDValue, 8> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
SDValue Base;
Operands.push_back(Base); // Base pointer.
if (IsStrided)
Operands.push_back(Node->getOperand(CurOp++)); // Stride.
- if (IsMasked)
- Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
Operands.push_back(SEW);
- Operands.push_back(Node->getOperand(0)); // Chain.
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
MVT XLenVT = Subtarget->getXLenVT();
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+ SDValue Chain = Node->getOperand(0);
+ SDValue Glue;
+
unsigned CurOp = 2;
SmallVector<SDValue, 7> Operands;
if (IsMasked)
SDValue Base;
SelectBaseAddr(Node->getOperand(CurOp++), Base);
Operands.push_back(Base); // Base pointer.
- if (IsMasked)
- Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ if (IsMasked) {
+ // Mask needs to be copied to V0.
+ SDValue Mask = Node->getOperand(CurOp++);
+ Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
+ Glue = Chain.getValue(1);
+ Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
+ }
SDValue VL;
selectVLOp(Node->getOperand(CurOp++), VL);
Operands.push_back(VL);
Operands.push_back(SEW);
- Operands.push_back(Node->getOperand(0)); // Chain.
+ Operands.push_back(Chain); // Chain.
+ if (Glue)
+ Operands.push_back(Glue);
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
unsigned CurOp = 2;
- SmallVector<SDValue, 6> Operands;
+ SmallVector<SDValue, 7> Operands;
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
SDValue Base;
SelectBaseAddr(Node->getOperand(CurOp++), Base);
SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT);
unsigned CurOp = 2;
- SmallVector<SDValue, 6> Operands;
+ SmallVector<SDValue, 7> Operands;
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
SDValue Base;
SelectBaseAddr(Node->getOperand(CurOp++), Base);
; RV32-NEXT: vle64.v v8, (a1)
; RV32-NEXT: vle64.v v16, (a3)
; RV32-NEXT: fcvt.d.w ft0, zero
-; RV32-NEXT: vmfeq.vf v0, v8, ft0
-; RV32-NEXT: vmfeq.vf v26, v16, ft0
-; RV32-NEXT: vle64.v v8, (a0), v0.t
-; RV32-NEXT: addi a0, a0, 128
-; RV32-NEXT: vmv1r.v v0, v26
+; RV32-NEXT: vmfeq.vf v25, v8, ft0
+; RV32-NEXT: vmfeq.vf v0, v16, ft0
+; RV32-NEXT: addi a1, a0, 128
+; RV32-NEXT: vle64.v v8, (a1), v0.t
+; RV32-NEXT: vmv1r.v v0, v25
; RV32-NEXT: vle64.v v16, (a0), v0.t
-; RV32-NEXT: vse64.v v8, (a2)
+; RV32-NEXT: vse64.v v16, (a2)
; RV32-NEXT: addi a0, a2, 128
-; RV32-NEXT: vse64.v v16, (a0)
+; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: masked_load_v32f64:
; RV64-NEXT: vle64.v v8, (a1)
; RV64-NEXT: vle64.v v16, (a3)
; RV64-NEXT: fmv.d.x ft0, zero
-; RV64-NEXT: vmfeq.vf v0, v8, ft0
-; RV64-NEXT: vmfeq.vf v26, v16, ft0
-; RV64-NEXT: vle64.v v8, (a0), v0.t
-; RV64-NEXT: addi a0, a0, 128
-; RV64-NEXT: vmv1r.v v0, v26
+; RV64-NEXT: vmfeq.vf v25, v8, ft0
+; RV64-NEXT: vmfeq.vf v0, v16, ft0
+; RV64-NEXT: addi a1, a0, 128
+; RV64-NEXT: vle64.v v8, (a1), v0.t
+; RV64-NEXT: vmv1r.v v0, v25
; RV64-NEXT: vle64.v v16, (a0), v0.t
-; RV64-NEXT: vse64.v v8, (a2)
+; RV64-NEXT: vse64.v v16, (a2)
; RV64-NEXT: addi a0, a2, 128
-; RV64-NEXT: vse64.v v16, (a0)
+; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
%m = load <32 x double>, <32 x double>* %m_ptr
%mask = fcmp oeq <32 x double> %m, zeroinitializer
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vle32.v v16, (a3)
; CHECK-NEXT: fmv.w.x ft0, zero
-; CHECK-NEXT: vmfeq.vf v0, v8, ft0
-; CHECK-NEXT: vmfeq.vf v26, v16, ft0
-; CHECK-NEXT: vle32.v v8, (a0), v0.t
-; CHECK-NEXT: addi a0, a0, 128
-; CHECK-NEXT: vmv1r.v v0, v26
+; CHECK-NEXT: vmfeq.vf v25, v8, ft0
+; CHECK-NEXT: vmfeq.vf v0, v16, ft0
+; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vle32.v v8, (a1), v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vle32.v v16, (a0), v0.t
-; CHECK-NEXT: vse32.v v8, (a2)
+; CHECK-NEXT: vse32.v v16, (a2)
; CHECK-NEXT: addi a0, a2, 128
-; CHECK-NEXT: vse32.v v16, (a0)
+; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%m = load <64 x float>, <64 x float>* %m_ptr
%mask = fcmp oeq <64 x float> %m, zeroinitializer
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vle16.v v16, (a3)
; CHECK-NEXT: fmv.h.x ft0, zero
-; CHECK-NEXT: vmfeq.vf v0, v8, ft0
-; CHECK-NEXT: vmfeq.vf v26, v16, ft0
-; CHECK-NEXT: vle16.v v8, (a0), v0.t
-; CHECK-NEXT: addi a0, a0, 128
-; CHECK-NEXT: vmv1r.v v0, v26
+; CHECK-NEXT: vmfeq.vf v25, v8, ft0
+; CHECK-NEXT: vmfeq.vf v0, v16, ft0
+; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vle16.v v8, (a1), v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vle16.v v16, (a0), v0.t
-; CHECK-NEXT: vse16.v v8, (a2)
+; CHECK-NEXT: vse16.v v16, (a2)
; CHECK-NEXT: addi a0, a2, 128
-; CHECK-NEXT: vse16.v v16, (a0)
+; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%m = load <128 x half>, <128 x half>* %m_ptr
%mask = fcmp oeq <128 x half> %m, zeroinitializer
define void @masked_load_v32i64(<32 x i64>* %a, <32 x i64>* %m_ptr, <32 x i64>* %res_ptr) nounwind {
; RV32-LABEL: masked_load_v32i64:
; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: sub sp, sp, a3
; RV32-NEXT: addi a3, a1, 128
; RV32-NEXT: vsetivli a4, 16, e64,m8,ta,mu
; RV32-NEXT: vle64.v v8, (a3)
+; RV32-NEXT: addi a3, sp, 16
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vle64.v v16, (a1)
; RV32-NEXT: addi a1, zero, 32
; RV32-NEXT: vsetvli a1, a1, e32,m8,ta,mu
-; RV32-NEXT: vmv.v.i v24, 0
+; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vsetivli a1, 16, e64,m8,ta,mu
-; RV32-NEXT: vmseq.vv v0, v16, v24
-; RV32-NEXT: vmseq.vv v16, v8, v24
-; RV32-NEXT: vle64.v v8, (a0), v0.t
-; RV32-NEXT: addi a0, a0, 128
-; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: vmseq.vv v25, v16, v8
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vmseq.vv v0, v16, v8
+; RV32-NEXT: addi a1, a0, 128
+; RV32-NEXT: vle64.v v8, (a1), v0.t
+; RV32-NEXT: vmv1r.v v0, v25
; RV32-NEXT: vle64.v v16, (a0), v0.t
-; RV32-NEXT: vse64.v v8, (a2)
+; RV32-NEXT: vse64.v v16, (a2)
; RV32-NEXT: addi a0, a2, 128
-; RV32-NEXT: vse64.v v16, (a0)
+; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: masked_load_v32i64:
; RV64-NEXT: vsetivli a4, 16, e64,m8,ta,mu
; RV64-NEXT: vle64.v v8, (a1)
; RV64-NEXT: vle64.v v16, (a3)
-; RV64-NEXT: vmseq.vi v0, v8, 0
-; RV64-NEXT: vmseq.vi v26, v16, 0
-; RV64-NEXT: vle64.v v8, (a0), v0.t
-; RV64-NEXT: addi a0, a0, 128
-; RV64-NEXT: vmv1r.v v0, v26
+; RV64-NEXT: vmseq.vi v25, v8, 0
+; RV64-NEXT: vmseq.vi v0, v16, 0
+; RV64-NEXT: addi a1, a0, 128
+; RV64-NEXT: vle64.v v8, (a1), v0.t
+; RV64-NEXT: vmv1r.v v0, v25
; RV64-NEXT: vle64.v v16, (a0), v0.t
-; RV64-NEXT: vse64.v v8, (a2)
+; RV64-NEXT: vse64.v v16, (a2)
; RV64-NEXT: addi a0, a2, 128
-; RV64-NEXT: vse64.v v16, (a0)
+; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
%m = load <32 x i64>, <32 x i64>* %m_ptr
%mask = icmp eq <32 x i64> %m, zeroinitializer
; CHECK-NEXT: vsetvli a4, a4, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vle32.v v16, (a3)
-; CHECK-NEXT: vmseq.vi v0, v8, 0
-; CHECK-NEXT: vmseq.vi v26, v16, 0
-; CHECK-NEXT: vle32.v v8, (a0), v0.t
-; CHECK-NEXT: addi a0, a0, 128
-; CHECK-NEXT: vmv1r.v v0, v26
+; CHECK-NEXT: vmseq.vi v25, v8, 0
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vle32.v v8, (a1), v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vle32.v v16, (a0), v0.t
-; CHECK-NEXT: vse32.v v8, (a2)
+; CHECK-NEXT: vse32.v v16, (a2)
; CHECK-NEXT: addi a0, a2, 128
-; CHECK-NEXT: vse32.v v16, (a0)
+; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%m = load <64 x i32>, <64 x i32>* %m_ptr
%mask = icmp eq <64 x i32> %m, zeroinitializer
; CHECK-NEXT: vsetvli a4, a4, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vle8.v v16, (a3)
-; CHECK-NEXT: vmseq.vi v0, v8, 0
-; CHECK-NEXT: vmseq.vi v26, v16, 0
-; CHECK-NEXT: vle8.v v8, (a0), v0.t
-; CHECK-NEXT: addi a0, a0, 128
-; CHECK-NEXT: vmv1r.v v0, v26
+; CHECK-NEXT: vmseq.vi v25, v8, 0
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vle8.v v8, (a1), v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vle8.v v16, (a0), v0.t
-; CHECK-NEXT: vse8.v v8, (a2)
+; CHECK-NEXT: vse8.v v16, (a2)
; CHECK-NEXT: addi a0, a2, 128
-; CHECK-NEXT: vse8.v v16, (a0)
+; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%m = load <256 x i8>, <256 x i8>* %m_ptr
%mask = icmp eq <256 x i8> %m, zeroinitializer
;
; RV64-LABEL: mgather_baseidx_nxv32i8:
; RV64: # %bb.0:
+; RV64-NEXT: vmv1r.v v25, v0
; RV64-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli a1, zero, e8,m1,tu,mu
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8,mf4,ta,mu
-; RV64-NEXT: vslidedown.vx v25, v0, a1
-; RV64-NEXT: vmv1r.v v26, v0
+; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; RV64-NEXT: vsext.vf8 v16, v9
; RV64-NEXT: vsetvli a2, zero, e8,m1,tu,mu
-; RV64-NEXT: vmv1r.v v0, v25
; RV64-NEXT: vloxei64.v v13, (a0), v16, v0.t
; RV64-NEXT: slli a2, a1, 1
; RV64-NEXT: vsetvli a3, zero, e8,mf2,ta,mu
-; RV64-NEXT: vslidedown.vx v26, v26, a2
+; RV64-NEXT: vslidedown.vx v25, v25, a2
; RV64-NEXT: vsetvli a2, zero, e8,mf4,ta,mu
-; RV64-NEXT: vslidedown.vx v0, v26, a1
+; RV64-NEXT: vslidedown.vx v0, v25, a1
; RV64-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; RV64-NEXT: vsext.vf8 v16, v11
; RV64-NEXT: vsetvli a1, zero, e8,m1,tu,mu
; RV64-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetvli a1, zero, e8,m1,tu,mu
-; RV64-NEXT: vmv1r.v v0, v26
+; RV64-NEXT: vmv1r.v v0, v25
; RV64-NEXT: vloxei64.v v14, (a0), v16, v0.t
; RV64-NEXT: vmv4r.v v8, v12
; RV64-NEXT: ret
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh,+m \
+; RUN: -regalloc=fast -verify-machineinstrs < %s | FileCheck %s
+
+; This test previously crashed with an error "ran out of registers during register allocation"
+
+declare void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i1>, i32)
+
+define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv4r.v v4, v8
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsseg2e16.v v4, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl)
+ ret void
+}