#define DEBUG_TYPE "riscv-isel"
-namespace RISCVZvlssegTable {
-struct RISCVZvlsseg {
- unsigned IntrinsicID;
- uint8_t SEW;
- uint8_t LMUL;
- uint8_t IndexLMUL;
- uint16_t Pseudo;
-};
-
-using namespace RISCV;
-
-#define GET_RISCVZvlssegTable_IMPL
+namespace llvm {
+namespace RISCV {
+#define GET_RISCVVSSEGTable_IMPL
+#define GET_RISCVVLSEGTable_IMPL
+#define GET_RISCVVLXSEGTable_IMPL
+#define GET_RISCVVSXSEGTable_IMPL
#include "RISCVGenSearchableTables.inc"
-
-} // namespace RISCVZvlssegTable
+} // namespace RISCV
+} // namespace llvm
void RISCVDAGToDAGISel::PostprocessISelDAG() {
doPeepholeLoadStoreADDI();
}
}
-void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo, bool IsMasked,
+void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
bool IsStrided) {
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 1;
Operands.push_back(Node->getOperand(CurOp++)); // VL.
Operands.push_back(SEW);
Operands.push_back(Node->getOperand(0)); // Chain.
- const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
- IntNo, ScalarSize, static_cast<unsigned>(LMUL),
- static_cast<unsigned>(RISCVVLMUL::LMUL_1));
+ const RISCV::VLSEGPseudo *P =
+ RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize,
+ static_cast<unsigned>(LMUL));
SDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
SDValue SuperReg = SDValue(Load, 0);
void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
SDLoc DL(Node);
- unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
MVT VT = Node->getSimpleValueType(0);
MVT XLenVT = Subtarget->getXLenVT();
Operands.push_back(Node->getOperand(CurOp++)); // VL.
Operands.push_back(SEW);
Operands.push_back(Node->getOperand(0)); // Chain.
- const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
- IntNo, ScalarSize, static_cast<unsigned>(LMUL),
- static_cast<unsigned>(RISCVVLMUL::LMUL_1));
+ const RISCV::VLSEGPseudo *P =
+ RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
+ ScalarSize, static_cast<unsigned>(LMUL));
SDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other,
MVT::Glue, Operands);
SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
CurDAG->RemoveDeadNode(Node);
}
-void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned IntNo,
- bool IsMasked) {
+void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
+ bool IsOrdered) {
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 1;
MVT VT = Node->getSimpleValueType(0);
RISCVVLMUL IndexLMUL = getLMUL(IndexVT);
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
- const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
- IntNo, IndexScalarSize, static_cast<unsigned>(LMUL),
+ const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
+ NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
SDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
CurDAG->RemoveDeadNode(Node);
}
-void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo, bool IsMasked,
+void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
bool IsStrided) {
SDLoc DL(Node);
unsigned NF = Node->getNumOperands() - 4;
Operands.push_back(Node->getOperand(CurOp++)); // VL.
Operands.push_back(SEW);
Operands.push_back(Node->getOperand(0)); // Chain.
- const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
- IntNo, ScalarSize, static_cast<unsigned>(LMUL),
- static_cast<unsigned>(RISCVVLMUL::LMUL_1));
+ const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
+ NF, IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
SDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
ReplaceNode(Node, Store);
}
-void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned IntNo,
- bool IsMasked) {
+void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
+ bool IsOrdered) {
SDLoc DL(Node);
unsigned NF = Node->getNumOperands() - 5;
if (IsMasked)
RISCVVLMUL IndexLMUL = getLMUL(IndexVT);
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
- const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
- IntNo, IndexScalarSize, static_cast<unsigned>(LMUL),
+ const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
+ NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
SDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
case Intrinsic::riscv_vlseg6:
case Intrinsic::riscv_vlseg7:
case Intrinsic::riscv_vlseg8: {
- selectVLSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ false);
+ selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vlseg2_mask:
case Intrinsic::riscv_vlseg6_mask:
case Intrinsic::riscv_vlseg7_mask:
case Intrinsic::riscv_vlseg8_mask: {
- selectVLSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ false);
+ selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vlsseg2:
case Intrinsic::riscv_vlsseg6:
case Intrinsic::riscv_vlsseg7:
case Intrinsic::riscv_vlsseg8: {
- selectVLSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ true);
+ selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vlsseg2_mask:
case Intrinsic::riscv_vlsseg6_mask:
case Intrinsic::riscv_vlsseg7_mask:
case Intrinsic::riscv_vlsseg8_mask: {
- selectVLSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ true);
+ selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vloxseg2:
case Intrinsic::riscv_vloxseg6:
case Intrinsic::riscv_vloxseg7:
case Intrinsic::riscv_vloxseg8:
+ selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
+ return;
case Intrinsic::riscv_vluxseg2:
case Intrinsic::riscv_vluxseg3:
case Intrinsic::riscv_vluxseg4:
case Intrinsic::riscv_vluxseg5:
case Intrinsic::riscv_vluxseg6:
case Intrinsic::riscv_vluxseg7:
- case Intrinsic::riscv_vluxseg8: {
- selectVLXSEG(Node, IntNo, /*IsMasked*/ false);
+ case Intrinsic::riscv_vluxseg8:
+ selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
return;
- }
case Intrinsic::riscv_vloxseg2_mask:
case Intrinsic::riscv_vloxseg3_mask:
case Intrinsic::riscv_vloxseg4_mask:
case Intrinsic::riscv_vloxseg6_mask:
case Intrinsic::riscv_vloxseg7_mask:
case Intrinsic::riscv_vloxseg8_mask:
+ selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
+ return;
case Intrinsic::riscv_vluxseg2_mask:
case Intrinsic::riscv_vluxseg3_mask:
case Intrinsic::riscv_vluxseg4_mask:
case Intrinsic::riscv_vluxseg5_mask:
case Intrinsic::riscv_vluxseg6_mask:
case Intrinsic::riscv_vluxseg7_mask:
- case Intrinsic::riscv_vluxseg8_mask: {
- selectVLXSEG(Node, IntNo, /*IsMasked*/ true);
+ case Intrinsic::riscv_vluxseg8_mask:
+ selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
return;
- }
case Intrinsic::riscv_vlseg8ff:
case Intrinsic::riscv_vlseg7ff:
case Intrinsic::riscv_vlseg6ff:
case Intrinsic::riscv_vsseg6:
case Intrinsic::riscv_vsseg7:
case Intrinsic::riscv_vsseg8: {
- selectVSSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ false);
+ selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vsseg2_mask:
case Intrinsic::riscv_vsseg6_mask:
case Intrinsic::riscv_vsseg7_mask:
case Intrinsic::riscv_vsseg8_mask: {
- selectVSSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ false);
+ selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
return;
}
case Intrinsic::riscv_vssseg2:
case Intrinsic::riscv_vssseg6:
case Intrinsic::riscv_vssseg7:
case Intrinsic::riscv_vssseg8: {
- selectVSSEG(Node, IntNo, /*IsMasked*/ false, /*IsStrided*/ true);
+ selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vssseg2_mask:
case Intrinsic::riscv_vssseg6_mask:
case Intrinsic::riscv_vssseg7_mask:
case Intrinsic::riscv_vssseg8_mask: {
- selectVSSEG(Node, IntNo, /*IsMasked*/ true, /*IsStrided*/ true);
+ selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
return;
}
case Intrinsic::riscv_vsoxseg2:
case Intrinsic::riscv_vsoxseg6:
case Intrinsic::riscv_vsoxseg7:
case Intrinsic::riscv_vsoxseg8:
+ selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
+ return;
case Intrinsic::riscv_vsuxseg2:
case Intrinsic::riscv_vsuxseg3:
case Intrinsic::riscv_vsuxseg4:
case Intrinsic::riscv_vsuxseg5:
case Intrinsic::riscv_vsuxseg6:
case Intrinsic::riscv_vsuxseg7:
- case Intrinsic::riscv_vsuxseg8: {
- selectVSXSEG(Node, IntNo, /*IsMasked*/ false);
+ case Intrinsic::riscv_vsuxseg8:
+ selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
return;
- }
case Intrinsic::riscv_vsoxseg2_mask:
case Intrinsic::riscv_vsoxseg3_mask:
case Intrinsic::riscv_vsoxseg4_mask:
case Intrinsic::riscv_vsoxseg6_mask:
case Intrinsic::riscv_vsoxseg7_mask:
case Intrinsic::riscv_vsoxseg8_mask:
+ selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
+ return;
case Intrinsic::riscv_vsuxseg2_mask:
case Intrinsic::riscv_vsuxseg3_mask:
case Intrinsic::riscv_vsuxseg4_mask:
case Intrinsic::riscv_vsuxseg5_mask:
case Intrinsic::riscv_vsuxseg6_mask:
case Intrinsic::riscv_vsuxseg7_mask:
- case Intrinsic::riscv_vsuxseg8_mask: {
- selectVSXSEG(Node, IntNo, /*IsMasked*/ true);
+ case Intrinsic::riscv_vsuxseg8_mask:
+ selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
return;
}
- }
break;
}
case ISD::BITCAST:
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
}
-class RISCVZvlsseg<string IntrName, bits<7> S, bits<3> L, bits<3> IL = V_M1.value> {
- Intrinsic IntrinsicID = !cast<Intrinsic>(IntrName);
+class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<7> S, bits<3> L> {
+ bits<4> NF = N;
+ bits<1> Masked = M;
+ bits<1> Strided = Str;
+ bits<1> FF = F;
+ bits<7> SEW = S;
+ bits<3> LMUL = L;
+ Pseudo Pseudo = !cast<Pseudo>(NAME);
+}
+
+def RISCVVLSEGTable : GenericTable {
+ let FilterClass = "RISCVVLSEG";
+ let CppTypeName = "VLSEGPseudo";
+ let Fields = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL", "Pseudo"];
+ let PrimaryKey = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL"];
+ let PrimaryKeyName = "getVLSEGPseudo";
+}
+
+class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
+ bits<4> NF = N;
+ bits<1> Masked = M;
+ bits<1> Ordered = O;
bits<7> SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
-def RISCVZvlssegTable : GenericTable {
- let FilterClass = "RISCVZvlsseg";
- let Fields = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
- let PrimaryKey = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL"];
- let PrimaryKeyName = "getPseudo";
+def RISCVVLXSEGTable : GenericTable {
+ let FilterClass = "RISCVVLXSEG";
+ let CppTypeName = "VLXSEGPseudo";
+ let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
+ let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
+ let PrimaryKeyName = "getVLXSEGPseudo";
+}
+
+class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<7> S, bits<3> L> {
+ bits<4> NF = N;
+ bits<1> Masked = M;
+ bits<1> Strided = Str;
+ bits<7> SEW = S;
+ bits<3> LMUL = L;
+ Pseudo Pseudo = !cast<Pseudo>(NAME);
+}
+
+def RISCVVSSEGTable : GenericTable {
+ let FilterClass = "RISCVVSSEG";
+ let CppTypeName = "VSSEGPseudo";
+ let Fields = ["NF", "Masked", "Strided", "SEW", "LMUL", "Pseudo"];
+ let PrimaryKey = ["NF", "Masked", "Strided", "SEW", "LMUL"];
+ let PrimaryKeyName = "getVSSEGPseudo";
+}
+
+class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
+ bits<4> NF = N;
+ bits<1> Masked = M;
+ bits<1> Ordered = O;
+ bits<7> SEW = S;
+ bits<3> LMUL = L;
+ bits<3> IndexLMUL = IL;
+ Pseudo Pseudo = !cast<Pseudo>(NAME);
+}
+
+def RISCVVSXSEGTable : GenericTable {
+ let FilterClass = "RISCVVSXSEG";
+ let CppTypeName = "VSXSEGPseudo";
+ let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
+ let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
+ let PrimaryKeyName = "getVSXSEGPseudo";
}
//===----------------------------------------------------------------------===//
!subst("Pseudo", "", PseudoInst))))))))))))))))))));
}
-class ToLowerCase<string Upper> {
- string L = !subst("FF", "ff",
- !subst("VLSEG", "vlseg",
- !subst("VLSSEG", "vlsseg",
- !subst("VSSEG", "vsseg",
- !subst("VSSSEG", "vssseg",
- !subst("VLOXSEG", "vloxseg",
- !subst("VLUXSEG", "vluxseg",
- !subst("VSOXSEG", "vsoxseg",
- !subst("VSUXSEG", "vsuxseg", Upper)))))))));
-}
-
-// Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2
-// Example: PseudoVLSEG2E32_V_M2_MASK -> int_riscv_vlseg2_mask
-class PseudoToIntrinsic<string PseudoInst, bit IsMasked> {
- string Intrinsic = !strconcat("int_riscv_",
- ToLowerCase<
- !subst("E8", "",
- !subst("E16", "",
- !subst("E32", "",
- !subst("E64", "",
- !subst("EI8", "",
- !subst("EI16", "",
- !subst("EI32", "",
- !subst("EI64", "",
- !subst("_V", "", PseudoToVInst<PseudoInst>.VInst)))))))))>.L,
- !if(IsMasked, "_mask", ""));
-}
-
// The destination vector register group for a masked vector instruction cannot
// overlap the source mask register (v0), unless the destination vector register
// is being written with a mask value (e.g., comparisons) or the scalar result
defm "EI" # eew : VPseudoAMOEI<eew>;
}
-class VPseudoUSSegLoadNoMask<VReg RetClass, bits<7> EEW>:
+class VPseudoUSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
+ RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/isFF, EEW, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSSegLoadMask<VReg RetClass, bits<7> EEW>:
+class VPseudoUSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
+ RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/isFF, EEW, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSSegLoadNoMask<VReg RetClass, bits<7> EEW>:
+class VPseudoSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$offset, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
+ RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, EEW, VLMul> {
+ let mayLoad = 1;
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSSegLoadMask<VReg RetClass, bits<7> EEW>:
+class VPseudoSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
GPR:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
+ RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, EEW, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL>:
+class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+ bits<4> NF, bit Ordered>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, IdxClass:$offset, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul, LMUL> {
+ RISCVVLXSEG<NF, /*Masked*/0, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL>:
+class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+ bits<4> NF, bit Ordered>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
IdxClass:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul, LMUL> {
+ RISCVVLXSEG<NF, /*Masked*/1, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSSegStoreNoMask<VReg ValClass, bits<7> EEW>:
+class VPseudoUSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
+ RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, EEW, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSSegStoreMask<VReg ValClass, bits<7> EEW>:
+class VPseudoUSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
+ RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, EEW, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSSegStoreNoMask<VReg ValClass, bits<7> EEW>:
+class VPseudoSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR: $offset, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
+ RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, EEW, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSSegStoreMask<VReg ValClass, bits<7> EEW>:
+class VPseudoSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR: $offset,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
+ RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, EEW, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL>:
+class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+ bits<4> NF, bit Ordered>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul, LMUL> {
+ RISCVVSXSEG<NF, /*Masked*/0, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL>:
+class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+ bits<4> NF, bit Ordered>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul, LMUL> {
+ RISCVVSXSEG<NF, /*Masked*/1, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
defvar vreg = SegRegClass<lmul, nf>.RC;
defvar FFStr = !if(isFF, "FF", "");
def nf # "E" # eew # FFStr # "_V_" # LInfo :
- VPseudoUSSegLoadNoMask<vreg, eew>;
+ VPseudoUSSegLoadNoMask<vreg, eew, nf, isFF>;
def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
- VPseudoUSSegLoadMask<vreg, eew>;
+ VPseudoUSSegLoadMask<vreg, eew, nf, isFF>;
}
}
}
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
- def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew>;
- def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew>;
+ def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>;
+ def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>;
}
}
}
}
}
-multiclass VPseudoISegLoad {
+multiclass VPseudoISegLoad<bit Ordered> {
foreach idx_eew = EEWList in { // EEW for index argument.
foreach idx_lmul = MxSet<idx_eew>.m in { // LMUL for index argument.
foreach val_lmul = MxList.m in { // LMUL for the value.
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
- VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
+ VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, nf, Ordered>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
- VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
+ VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, nf, Ordered>;
}
}
}
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
- def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew>;
- def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew>;
+ def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>;
+ def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>;
}
}
}
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
- def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew>;
- def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew>;
+ def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>;
+ def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>;
}
}
}
}
}
-multiclass VPseudoISegStore {
+multiclass VPseudoISegStore<bit Ordered> {
foreach idx_eew = EEWList in { // EEW for index argument.
foreach idx_lmul = MxSet<idx_eew>.m in { // LMUL for index argument.
foreach val_lmul = MxList.m in { // LMUL for the value.
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
- VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
+ VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, nf, Ordered>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
- VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
+ VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value, nf, Ordered>;
}
}
}
//===----------------------------------------------------------------------===//
defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/false>;
defm PseudoVLSSEG : VPseudoSSegLoad;
-defm PseudoVLOXSEG : VPseudoISegLoad;
-defm PseudoVLUXSEG : VPseudoISegLoad;
+defm PseudoVLOXSEG : VPseudoISegLoad</*Ordered=*/true>;
+defm PseudoVLUXSEG : VPseudoISegLoad</*Ordered=*/false>;
defm PseudoVSSEG : VPseudoUSSegStore;
defm PseudoVSSSEG : VPseudoSSegStore;
-defm PseudoVSOXSEG : VPseudoISegStore;
-defm PseudoVSUXSEG : VPseudoISegStore;
+defm PseudoVSOXSEG : VPseudoISegStore</*Ordered=*/true>;
+defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>;
// vlseg<nf>e<eew>ff.v may update VL register
let hasSideEffects = 1, Defs = [VL] in