#define GET_RISCVVLSEGTable_IMPL
#define GET_RISCVVLXSEGTable_IMPL
#define GET_RISCVVSXSEGTable_IMPL
+#define GET_RISCVVLXTable_IMPL
+#define GET_RISCVVSXTable_IMPL
#include "RISCVGenSearchableTables.inc"
} // namespace RISCV
} // namespace llvm
selectVLSEGFF(Node, /*IsMasked*/ true);
return;
}
+ case Intrinsic::riscv_vloxei:
+ case Intrinsic::riscv_vloxei_mask:
+ case Intrinsic::riscv_vluxei:
+ case Intrinsic::riscv_vluxei_mask: {
+ bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
+ IntNo == Intrinsic::riscv_vluxei_mask;
+ bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
+ IntNo == Intrinsic::riscv_vloxei_mask;
+
+ SDLoc DL(Node);
+ MVT VT = Node->getSimpleValueType(0);
+ unsigned ScalarSize = VT.getScalarSizeInBits();
+ MVT XLenVT = Subtarget->getXLenVT();
+ SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+
+ unsigned CurOp = 2;
+ SmallVector<SDValue, 7> Operands;
+ if (IsMasked)
+ Operands.push_back(Node->getOperand(CurOp++));
+ Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
+ Operands.push_back(Node->getOperand(CurOp++)); // Index.
+ MVT IndexVT = Operands.back()->getSimpleValueType(0);
+ if (IsMasked)
+ Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ SDValue VL;
+ selectVLOp(Node->getOperand(CurOp++), VL);
+ Operands.push_back(VL);
+ Operands.push_back(SEW);
+ Operands.push_back(Node->getOperand(0)); // Chain.
+
+ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
+ "Element count mismatch");
+
+ RISCVVLMUL LMUL = getLMUL(VT);
+ RISCVVLMUL IndexLMUL = getLMUL(IndexVT);
+ unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
+ const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
+ IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
+ static_cast<unsigned>(IndexLMUL));
+ SDNode *Load =
+ CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
+ ReplaceNode(Node, Load);
+ return;
+ }
}
break;
}
case Intrinsic::riscv_vsuxseg8_mask:
selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
return;
+ case Intrinsic::riscv_vsoxei:
+ case Intrinsic::riscv_vsoxei_mask:
+ case Intrinsic::riscv_vsuxei:
+ case Intrinsic::riscv_vsuxei_mask: {
+ bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
+ IntNo == Intrinsic::riscv_vsuxei_mask;
+ bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
+ IntNo == Intrinsic::riscv_vsoxei_mask;
+
+ SDLoc DL(Node);
+ MVT VT = Node->getOperand(2)->getSimpleValueType(0);
+ unsigned ScalarSize = VT.getScalarSizeInBits();
+ MVT XLenVT = Subtarget->getXLenVT();
+ SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+
+ unsigned CurOp = 2;
+ SmallVector<SDValue, 6> Operands;
+ Operands.push_back(Node->getOperand(CurOp++)); // Store value.
+ Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
+ Operands.push_back(Node->getOperand(CurOp++)); // Index.
+ MVT IndexVT = Operands.back()->getSimpleValueType(0);
+ if (IsMasked)
+ Operands.push_back(Node->getOperand(CurOp++)); // Mask.
+ SDValue VL;
+ selectVLOp(Node->getOperand(CurOp++), VL);
+ Operands.push_back(VL);
+ Operands.push_back(SEW);
+ Operands.push_back(Node->getOperand(0)); // Chain.
+
+ assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
+ "Element count mismatch");
+
+ RISCVVLMUL LMUL = getLMUL(VT);
+ RISCVVLMUL IndexLMUL = getLMUL(IndexVT);
+ unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
+ const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
+ IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
+ static_cast<unsigned>(IndexLMUL));
+ SDNode *Store =
+ CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
+ ReplaceNode(Node, Store);
+ return;
+ }
}
break;
}
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
}
+class RISCVVLX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
+ bits<1> Masked = M;
+ bits<1> Ordered = O;
+ bits<7> SEW = S;
+ bits<3> LMUL = L;
+ bits<3> IndexLMUL = IL;
+ Pseudo Pseudo = !cast<Pseudo>(NAME);
+}
+
+def RISCVVLXTable : GenericTable {
+ let FilterClass = "RISCVVLX";
+ let CppTypeName = "VLX_VSXPseudo";
+ let Fields = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
+ let PrimaryKey = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
+ let PrimaryKeyName = "getVLXPseudo";
+}
+
+class RISCVVSX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
+ bits<1> Masked = M;
+ bits<1> Ordered = O;
+ bits<7> SEW = S;
+ bits<3> LMUL = L;
+ bits<3> IndexLMUL = IL;
+ Pseudo Pseudo = !cast<Pseudo>(NAME);
+}
+
+def RISCVVSXTable : GenericTable {
+ let FilterClass = "RISCVVSX";
+ let CppTypeName = "VLX_VSXPseudo";
+ let Fields = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
+ let PrimaryKey = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
+ let PrimaryKeyName = "getVSXPseudo";
+}
+
class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<7> S, bits<3> L> {
bits<4> NF = N;
bits<1> Masked = M;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass>:
+class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+ bit Ordered>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
- RISCVVPseudo {
+ RISCVVPseudo,
+ RISCVVLX</*Masked*/0, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoILoadMask<VReg RetClass, VReg IdxClass>:
+class VPseudoILoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+ bit Ordered>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1, IdxClass:$rs2,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
- RISCVVPseudo {
+ RISCVVPseudo,
+ RISCVVLX</*Masked*/1, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass>:
+class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+ bit Ordered>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
- RISCVVPseudo {
+ RISCVVPseudo,
+ RISCVVSX</*Masked*/0, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoIStoreMask<VReg StClass, VReg IdxClass>:
+class VPseudoIStoreMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+ bit Ordered>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
- RISCVVPseudo {
+ RISCVVPseudo,
+ RISCVVSX</*Masked*/1, Ordered, EEW, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
}
}
-multiclass VPseudoILoad {
+multiclass VPseudoILoad<bit Ordered> {
foreach eew = EEWList in {
foreach sew = EEWList in {
foreach lmul = MxSet<sew>.m in {
defvar Vreg = lmul.vrclass;
defvar IdxVreg = idx_lmul.vrclass;
let VLMul = lmul.value in {
- def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo : VPseudoILoadNoMask<Vreg, IdxVreg>;
- def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoILoadMask<Vreg, IdxVreg>;
+ def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
+ VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
+ def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
+ VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
}
}
}
}
}
-multiclass VPseudoIStore {
+multiclass VPseudoIStore<bit Ordered> {
foreach eew = EEWList in {
foreach sew = EEWList in {
foreach lmul = MxSet<sew>.m in {
defvar IdxVreg = idx_lmul.vrclass;
let VLMul = lmul.value in {
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
- VPseudoIStoreNoMask<Vreg, IdxVreg>;
+ VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
- VPseudoIStoreMask<Vreg, IdxVreg>;
+ VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
}
}
}
//===----------------------------------------------------------------------===//
// Vector Indexed Loads and Stores
-defm PseudoVLUX : VPseudoILoad;
-defm PseudoVLOX : VPseudoILoad;
-defm PseudoVSOX : VPseudoIStore;
-defm PseudoVSUX : VPseudoIStore;
+defm PseudoVLUX : VPseudoILoad</*Ordered=*/false>;
+defm PseudoVLOX : VPseudoILoad</*Ordered=*/true>;
+defm PseudoVSOX : VPseudoIStore</*Ordered=*/true>;
+defm PseudoVSUX : VPseudoIStore</*Ordered=*/false>;
//===----------------------------------------------------------------------===//
// 7.7. Unit-stride Fault-Only-First Loads
vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
}
-//===----------------------------------------------------------------------===//
-// 7.6 Vector Indexed Instructions
-//===----------------------------------------------------------------------===//
-
-foreach vti = AllVectors in
-foreach eew = EEWList in {
- defvar vlmul = vti.LMul;
- defvar octuple_lmul = octuple_from_str<vti.LMul.MX>.ret;
- defvar log_sew = shift_amount<vti.SEW>.val;
- // The data vector register group has EEW=SEW, EMUL=LMUL, while the offset
- // vector register group has EEW encoding in the instruction and EMUL=(EEW/SEW)*LMUL.
- // calculate octuple elmul which is (eew * octuple_lmul) >> log_sew
- defvar octuple_elmul = !srl(!mul(eew, octuple_lmul), log_sew);
- // legal octuple elmul should be more than 0 and less than equal 64
- if !gt(octuple_elmul, 0) then {
- if !le(octuple_elmul, 64) then {
- defvar elmul_str = octuple_to_str<octuple_elmul>.ret;
- defvar elmul =!cast<LMULInfo>("V_" # elmul_str);
- defvar idx_vti = !cast<VTypeInfo>("VI" # eew # elmul_str);
-
- defm : VPatILoad<"int_riscv_vluxei",
- "PseudoVLUXEI"#eew,
- vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
- vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
- defm : VPatILoad<"int_riscv_vloxei",
- "PseudoVLOXEI"#eew,
- vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
- vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
- defm : VPatIStore<"int_riscv_vsoxei",
- "PseudoVSOXEI"#eew,
- vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
- vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
- defm : VPatIStore<"int_riscv_vsuxei",
- "PseudoVSUXEI"#eew,
- vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
- vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
- }
- }
-}
} // Predicates = [HasStdExtV]
//===----------------------------------------------------------------------===//