}
void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
- SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp,
- bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
- MVT *IndexVT) {
+ SDNode *Node, unsigned SEW, const SDLoc &DL, unsigned CurOp, bool IsMasked,
+ bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands, MVT *IndexVT) {
SDValue Chain = Node->getOperand(0);
SDValue Glue;
Operands.push_back(VL);
MVT XLenVT = Subtarget->getXLenVT();
- SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT);
- Operands.push_back(SEW);
+ SDValue SEWOp = CurDAG->getTargetConstant(Log2_32(SEW), DL, XLenVT);
+ Operands.push_back(SEWOp);
Operands.push_back(Chain); // Chain.
if (Glue)
VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
break;
}
- SDValue SEW =
- CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT);
+ SDValue SEW = CurDAG->getTargetConstant(
+ Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(3), VL);
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
break;
}
- SDValue SEW =
- CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT);
+ SDValue SEW = CurDAG->getTargetConstant(
+ Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(5), VL);
SDValue MaskedOff = Node->getOperand(1);
MVT VT = Node->getSimpleValueType(0);
unsigned ScalarSize = VT.getScalarSizeInBits();
// VLE1 uses an SEW of 8.
- unsigned SEWImm = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
+ unsigned SEW = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
- addVectorLoadStoreOperands(Node, SEWImm, DL, CurOp, IsMasked, IsStrided,
+ addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned ScalarSize = VT.getScalarSizeInBits();
// VSE1 uses an SEW of 8.
- unsigned SEWImm = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize;
+ unsigned SEW = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize;
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
- addVectorLoadStoreOperands(Node, SEWImm, DL, CurOp, IsMasked, IsStrided,
+ addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
selectVLOp(Node->getOperand(1), VL);
unsigned ScalarSize = VT.getScalarSizeInBits();
- SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+ SDValue SEW = CurDAG->getTargetConstant(Log2_32(ScalarSize), DL, XLenVT);
SDValue Operands[] = {Ld->getBasePtr(),
CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
DebugLoc DL = MI.getDebugLoc();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- unsigned SEW = MI.getOperand(SEWIndex).getImm();
- assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
- RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
+ unsigned Log2SEW = MI.getOperand(SEWIndex).getImm();
+ assert(RISCVVType::isValidSEW(1 << Log2SEW) && "Unexpected SEW");
+ RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2SEW - 3);
MachineRegisterInfo &MRI = MF.getRegInfo();
ValueType Vector = Vec;
ValueType Mask = Mas;
int SEW = Sew;
+ int Log2SEW = shift_amount<Sew>.val;
VReg RegClass = Reg;
LMULInfo LMul = M;
ValueType Scalar = Scal;
// {SEW, VLMul} values set a valid VType to deal with this mask type.
// we assume SEW=8 and set corresponding LMUL.
int SEW = 8;
+ int Log2SEW = 3;
LMULInfo LMul = M;
string BX = Bx; // Appendix of mask operations.
// The pattern fragment which produces the AVL operand, representing the
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX)
(mti.Mask VR:$rs2),
- GPR:$vl, mti.SEW)>;
+ GPR:$vl, mti.Log2SEW)>;
class VPatMaskUnaryMask<string intrinsic_name,
string inst,
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
- (mti.Mask V0), GPR:$vl, mti.SEW)>;
+ (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
class VPatUnaryAnyMask<string intrinsic,
string inst,
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
(mti.Mask VR:$rs1), VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
- GPR:$vl, mti.SEW)>;
+ GPR:$vl, mti.Log2SEW)>;
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
(mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
- (mti.Mask V0), GPR:$vl, mti.SEW)>;
+ (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
}
}
foreach vti = vtilist in {
def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
vti.Vector, vti.Vector, vti.Mask,
- vti.SEW, vti.LMul, vti.RegClass,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
vti.RegClass>;
}
}
{
foreach vti = AllIntegerVectors in {
def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
- vti.SEW, vti.LMul, VR>;
+ vti.Log2SEW, vti.LMul, VR>;
def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
- vti.Mask, vti.SEW, vti.LMul, vti.RegClass, VR>;
+ vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
}
}
defvar fti = vtiTofti.Fti;
def : VPatUnaryNoMask<intrinsic, instruction, suffix,
vti.Vector, fti.Vector,
- vti.SEW, vti.LMul, fti.RegClass>;
+ vti.Log2SEW, vti.LMul, fti.RegClass>;
def : VPatUnaryMask<intrinsic, instruction, suffix,
vti.Vector, fti.Vector, vti.Mask,
- vti.SEW, vti.LMul, vti.RegClass, fti.RegClass>;
+ vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
}
}
foreach vti = vtilist in {
def : VPatUnaryNoMask<intrinsic, instruction, "V",
vti.Vector, vti.Vector,
- vti.SEW, vti.LMul, vti.RegClass>;
+ vti.Log2SEW, vti.LMul, vti.RegClass>;
def : VPatUnaryMask<intrinsic, instruction, "V",
vti.Vector, vti.Vector, vti.Mask,
- vti.SEW, vti.LMul, vti.RegClass, vti.RegClass>;
+ vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
}
}
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
VLOpFrag)),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
(vti.Vector vti.RegClass:$merge),
(vti.Mask V0), VLOpFrag)),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
vti.RegClass:$merge, (vti.Mask V0),
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
}
}
def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
(XLenVT (VLOp (XLenVT (XLenVT GPR:$vl)))))),
(!cast<Instruction>(inst#"_M_"#mti.BX)
- GPR:$vl, mti.SEW)>;
+ GPR:$vl, mti.Log2SEW)>;
}
multiclass VPatBinary<string intrinsic,
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Vector, vti.Vector, vti.Vector,vti.Mask,
- vti.SEW, vti.RegClass,
+ vti.Log2SEW, vti.RegClass,
vti.RegClass, vti.RegClass>;
}
defvar ivti = GetIntVTypeInfo<vti>.Vti;
defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
- vti.SEW, vti.RegClass,
+ vti.Log2SEW, vti.RegClass,
vti.RegClass, vti.RegClass>;
}
}
// emul = lmul * eew / sew
defvar vlmul = vti.LMul;
defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret;
- defvar octuple_emul = !srl(!mul(octuple_lmul, eew), shift_amount<vti.SEW>.val);
+ defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emul_str = octuple_to_str<octuple_emul>.ret;
defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str;
defm : VPatBinary<intrinsic, inst,
vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
- vti.SEW, vti.RegClass,
+ vti.Log2SEW, vti.RegClass,
vti.RegClass, ivti.RegClass>;
}
}
defvar kind = "V"#vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
- vti.SEW, vti.RegClass,
+ vti.Log2SEW, vti.RegClass,
vti.RegClass, vti.ScalarRegClass>;
}
}
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
vti.Vector, vti.Vector, XLenVT, vti.Mask,
- vti.SEW, vti.RegClass,
+ vti.Log2SEW, vti.RegClass,
vti.RegClass, GPR>;
}
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
vti.Vector, vti.Vector, XLenVT, vti.Mask,
- vti.SEW, vti.RegClass,
+ vti.Log2SEW, vti.RegClass,
vti.RegClass, imm_type>;
}
foreach mti = AllMasks in
def : VPatBinaryNoMask<intrinsic, instruction # "_MM_" # mti.LMul.MX,
mti.Mask, mti.Mask, mti.Mask,
- mti.SEW, VR, VR>;
+ mti.Log2SEW, VR, VR>;
}
multiclass VPatBinaryW_VV<string intrinsic, string instruction,
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
- Vti.SEW, Wti.RegClass,
+ Vti.Log2SEW, Wti.RegClass,
Vti.RegClass, Vti.RegClass>;
}
}
defvar kind = "V"#Vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
- Vti.SEW, Wti.RegClass,
+ Vti.Log2SEW, Wti.RegClass,
Vti.RegClass, Vti.ScalarRegClass>;
}
}
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
- Vti.SEW, Wti.RegClass,
+ Vti.Log2SEW, Wti.RegClass,
Wti.RegClass, Vti.RegClass>;
}
}
defvar kind = "W"#Vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
- Vti.SEW, Wti.RegClass,
+ Vti.Log2SEW, Wti.RegClass,
Wti.RegClass, Vti.ScalarRegClass>;
}
}
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
- Vti.SEW, Vti.RegClass,
+ Vti.Log2SEW, Vti.RegClass,
Wti.RegClass, Vti.RegClass>;
}
}
defvar kind = "W"#Vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
- Vti.SEW, Vti.RegClass,
+ Vti.Log2SEW, Vti.RegClass,
Wti.RegClass, Vti.ScalarRegClass>;
}
}
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
- Vti.SEW, Vti.RegClass,
+ Vti.Log2SEW, Vti.RegClass,
Wti.RegClass, uimm5>;
}
}
defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
!if(CarryOut, vti.Mask, vti.Vector),
vti.Vector, vti.Vector, vti.Mask,
- vti.SEW, vti.LMul,
+ vti.Log2SEW, vti.LMul,
vti.RegClass, vti.RegClass>;
}
"V"#vti.ScalarSuffix#"M",
!if(CarryOut, vti.Mask, vti.Vector),
vti.Vector, vti.Scalar, vti.Mask,
- vti.SEW, vti.LMul,
+ vti.Log2SEW, vti.LMul,
vti.RegClass, vti.ScalarRegClass>;
}
defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
!if(CarryOut, vti.Mask, vti.Vector),
vti.Vector, XLenVT, vti.Mask,
- vti.SEW, vti.LMul,
+ vti.Log2SEW, vti.LMul,
vti.RegClass, simm5>;
}
foreach vti = AllIntegerVectors in
defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
vti.Mask, vti.Vector, vti.Vector,
- vti.SEW, vti.LMul,
+ vti.Log2SEW, vti.LMul,
vti.RegClass, vti.RegClass>;
}
foreach vti = AllIntegerVectors in
defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
vti.Mask, vti.Vector, XLenVT,
- vti.SEW, vti.LMul,
+ vti.Log2SEW, vti.LMul,
vti.RegClass, GPR>;
}
foreach vti = AllIntegerVectors in
defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
vti.Mask, vti.Vector, XLenVT,
- vti.SEW, vti.LMul,
+ vti.Log2SEW, vti.LMul,
vti.RegClass, simm5>;
}
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Mask, vti.Vector, vti.Vector, vti.Mask,
- vti.SEW, VR,
+ vti.Log2SEW, VR,
vti.RegClass, vti.RegClass>;
}
foreach vti = vtilist in
defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Mask, vti.Vector, vti.Vector, vti.Mask,
- vti.SEW, VR,
+ vti.Log2SEW, VR,
vti.RegClass, vti.RegClass>;
}
defvar kind = "V"#vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
- vti.SEW, VR,
+ vti.Log2SEW, VR,
vti.RegClass, vti.ScalarRegClass>;
}
}
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
vti.Mask, vti.Vector, XLenVT, vti.Mask,
- vti.SEW, VR,
+ vti.Log2SEW, VR,
vti.RegClass, simm5>;
}
foreach vti = vtilist in
defm : VPatTernary<intrinsic, instruction, "VV",
vti.Vector, vti.Vector, vti.Vector, vti.Mask,
- vti.SEW, vti.LMul, vti.RegClass,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
vti.RegClass, vti.RegClass>;
}
foreach vti = vtilist in
defm : VPatTernary<intrinsic, instruction, "VX",
vti.Vector, vti.Vector, XLenVT, vti.Mask,
- vti.SEW, vti.LMul, vti.RegClass,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
vti.RegClass, GPR>;
}
defm : VPatTernary<intrinsic, instruction,
"V"#vti.ScalarSuffix,
vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
- vti.SEW, vti.LMul, vti.RegClass,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
vti.ScalarRegClass, vti.RegClass>;
}
foreach vti = vtilist in
defm : VPatTernary<intrinsic, instruction, "VI",
vti.Vector, vti.Vector, XLenVT, vti.Mask,
- vti.SEW, vti.LMul, vti.RegClass,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
vti.RegClass, Imm_type>;
}
defvar wti = vtiToWti.Wti;
defm : VPatTernary<intrinsic, instruction, "VV",
wti.Vector, vti.Vector, vti.Vector,
- vti.Mask, vti.SEW, vti.LMul,
+ vti.Mask, vti.Log2SEW, vti.LMul,
wti.RegClass, vti.RegClass, vti.RegClass>;
}
}
defm : VPatTernary<intrinsic, instruction,
"V"#vti.ScalarSuffix,
wti.Vector, vti.Scalar, vti.Vector,
- vti.Mask, vti.SEW, vti.LMul,
+ vti.Mask, vti.Log2SEW, vti.LMul,
wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
}
}
defm : VPatTernary<intrinsic, instruction, "VS",
vectorM1.Vector, vti.Vector,
vectorM1.Vector, vti.Mask,
- vti.SEW, vti.LMul,
+ vti.Log2SEW, vti.LMul,
VR, vti.RegClass, VR>;
}
foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
defm : VPatTernary<intrinsic, instruction, "VS",
gvti.VectorM1, gvti.Vector,
gvti.VectorM1, gvti.Mask,
- gvti.SEW, gvti.LMul,
+ gvti.Log2SEW, gvti.LMul,
VR, gvti.RegClass, VR>;
}
}
defm : VPatTernary<intrinsic, instruction, "VS",
wtiM1.Vector, vti.Vector,
wtiM1.Vector, vti.Mask,
- vti.SEW, vti.LMul,
+ vti.Log2SEW, vti.LMul,
wtiM1.RegClass, vti.RegClass,
wtiM1.RegClass>;
}
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
defm : VPatConversion<intrinsic, instruction, "V",
- ivti.Vector, fvti.Vector, ivti.Mask, fvti.SEW,
+ ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
fvti.LMul, ivti.RegClass, fvti.RegClass>;
}
}
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
defm : VPatConversion<intrinsic, instruction, "V",
- fvti.Vector, ivti.Vector, fvti.Mask, ivti.SEW,
+ fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
ivti.LMul, fvti.RegClass, ivti.RegClass>;
}
}
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
defm : VPatConversion<intrinsic, instruction, "V",
- iwti.Vector, fvti.Vector, iwti.Mask, fvti.SEW,
+ iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
fvti.LMul, iwti.RegClass, fvti.RegClass>;
}
}
defvar fwti = vtiToWti.Wti;
defm : VPatConversion<intrinsic, instruction, "V",
- fwti.Vector, vti.Vector, fwti.Mask, vti.SEW,
+ fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
vti.LMul, fwti.RegClass, vti.RegClass>;
}
}
defvar fwti = fvtiToFWti.Wti;
defm : VPatConversion<intrinsic, instruction, "V",
- fwti.Vector, fvti.Vector, fwti.Mask, fvti.SEW,
+ fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
fvti.LMul, fwti.RegClass, fvti.RegClass>;
}
}
defvar fwti = vtiToWti.Wti;
defm : VPatConversion<intrinsic, instruction, "W",
- vti.Vector, fwti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, fwti.RegClass>;
}
}
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
defm : VPatConversion<intrinsic, instruction, "W",
- fvti.Vector, iwti.Vector, fvti.Mask, fvti.SEW,
+ fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
fvti.LMul, fvti.RegClass, iwti.RegClass>;
}
}
defvar fwti = fvtiToFWti.Wti;
defm : VPatConversion<intrinsic, instruction, "W",
- fvti.Vector, fwti.Vector, fvti.Mask, fvti.SEW,
+ fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
fvti.LMul, fvti.RegClass, fwti.RegClass>;
}
}
if !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)) then {
defvar octuple_lmul = octuple_from_str<vti.LMul.MX>.ret;
// Calculate emul = eew * lmul / sew
- defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<vti.SEW>.val);
+ defvar octuple_emul = !srl(!mul(eew, octuple_lmul), vti.Log2SEW);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emulMX = octuple_to_str<octuple_emul>.ret;
defvar offsetVti = !cast<VTypeInfo>("VI" # eew # emulMX);
defvar inst_ei = inst # "EI" # eew;
defm : VPatAMOWD<intrinsic, inst_ei,
vti.Vector, offsetVti.Vector,
- vti.Mask, vti.SEW, vti.LMul, offsetVti.LMul, offsetVti.RegClass>;
+ vti.Mask, vti.Log2SEW, vti.LMul, offsetVti.LMul, offsetVti.RegClass>;
}
}
}
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs2,
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
vti.RegClass:$rs2,
(vti.Mask V0),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
// Match VSUB with a small immediate to vadd.vi by negating the immediate.
def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(NegImm simm5_plus1:$rs2),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(NegImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
}
//===----------------------------------------------------------------------===//
(!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
// Special cases to avoid matching vmsltu.vi 0 (always false) to
// vmsleu.vi -1 (always true). Instead match to vmsne.vv.
(!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs1,
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar 0),
vti.RegClass:$rs1,
(vti.Mask V0),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsge (vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(!cast<Instruction>("PseudoVMSGT_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsge_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(!cast<Instruction>("PseudoVMSGTU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
// Special cases to avoid matching vmsgeu.vi 0 (always true) to
// vmsgtu.vi -1 (always false). Instead match to vmsne.vv.
(!cast<Instruction>("PseudoVMSEQ_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs1,
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar 0),
vti.RegClass:$rs1,
(vti.Mask V0),
GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
}
//===----------------------------------------------------------------------===//
def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
- $rs1, GPR:$vl, vti.SEW)>;
+ $rs1, GPR:$vl, vti.Log2SEW)>;
// vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
}
def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2),
(fvti.Scalar (fpimm0)),
(fvti.Mask V0), VLOpFrag)),
- (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.SEW)>;
+ (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
}
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtV] in {
foreach vti = AllIntegerVectors in {
def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
- (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.SEW)>;
+ (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
// vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
}
} // Predicates = [HasStdExtV]
defvar instr = !cast<Instruction>("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" #
fvti.LMul.MX);
def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))),
- (instr $rs2, fvti.SEW)>;
+ (instr $rs2, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
(fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
fvti.LMul.MX)
(fvti.Vector $rs1),
(fvti.Scalar fvti.ScalarRegClass:$rs2),
- GPR:$vl, fvti.SEW)>;
+ GPR:$vl, fvti.Log2SEW)>;
}
} // Predicates = [HasStdExtV, HasStdExtF]
}
multiclass VPatUSLoadStoreSDNode<ValueType type,
- int sew,
+ int log2sew,
LMULInfo vlmul,
OutPatFrag avl,
- VReg reg_class>
+ VReg reg_class,
+ int sew = !shl(1, log2sew)>
{
defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX);
defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX);
// Load
def : Pat<(type (load BaseAddr:$rs1)),
- (load_instr BaseAddr:$rs1, avl, sew)>;
+ (load_instr BaseAddr:$rs1, avl, log2sew)>;
// Store
def : Pat<(store type:$rs2, BaseAddr:$rs1),
- (store_instr reg_class:$rs2, BaseAddr:$rs1, avl, sew)>;
+ (store_instr reg_class:$rs2, BaseAddr:$rs1, avl, log2sew)>;
}
multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type,
- int sew,
+ int log2sew,
LMULInfo vlmul,
- VReg reg_class>
+ VReg reg_class,
+ int sew = !shl(1, log2sew)>
{
defvar load_instr =
!cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V");
defvar store_instr = !cast<Instruction>("PseudoVSE1_V_"#m.BX);
// Load
def : Pat<(m.Mask (load BaseAddr:$rs1)),
- (load_instr BaseAddr:$rs1, m.AVL, m.SEW)>;
+ (load_instr BaseAddr:$rs1, m.AVL, m.Log2SEW)>;
// Store
def : Pat<(store m.Mask:$rs2, BaseAddr:$rs1),
- (store_instr VR:$rs2, BaseAddr:$rs1, m.AVL, m.SEW)>;
+ (store_instr VR:$rs2, BaseAddr:$rs1, m.AVL, m.Log2SEW)>;
}
class VPatBinarySDNode_VV<SDNode vop,
{
foreach vti = AllIntegerVectors in {
def : VPatBinarySDNode_VV<vop, instruction_name,
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>;
def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
SplatPat, GPR>;
}
{
foreach vti = AllIntegerVectors in {
def : VPatBinarySDNode_VV<vop, instruction_name,
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>;
def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
SplatPat, GPR>;
def : VPatBinarySDNode_XI<vop, instruction_name, "VI",
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
!cast<ComplexPattern>(SplatPat#_#ImmType),
ImmType>;
multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> {
foreach vti = AllFloatVectors in {
def : VPatBinarySDNode_VV<vop, instruction_name,
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>;
def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
- vti.SEW, vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
+ vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
vti.ScalarRegClass>;
}
}
(!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1,
(fvti.Scalar fvti.ScalarRegClass:$rs2),
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
}
multiclass VPatIntegerSetCCSDNode_VV<CondCode cc,
SwapHelper<(instruction),
(instruction vti.RegClass:$rs1),
(instruction vti.RegClass:$rs2),
- (instruction vti.AVL, vti.SEW),
+ (instruction vti.AVL, vti.Log2SEW),
swap>.Value>;
}
}
SwapHelper<(instruction),
(instruction vti.RegClass:$rs1),
(instruction xop_kind:$rs2),
- (instruction vti.AVL, vti.SEW),
+ (instruction vti.AVL, vti.Log2SEW),
swap>.Value>;
}
}
(vti.Vector (splatpat_kind simm5:$rs2)),
cc)),
(instruction vti.RegClass:$rs1, (DecImm simm5:$rs2),
- vti.AVL, vti.SEW)>;
+ vti.AVL, vti.Log2SEW)>;
}
}
(fvti.Vector fvti.RegClass:$rs2),
cc)),
(!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX)
- fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.SEW)>;
+ fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1),
(splat_vector fvti.ScalarRegClass:$rs2),
cc)),
(!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (setcc (splat_vector fvti.ScalarRegClass:$rs2),
(fvti.Vector fvti.RegClass:$rs1),
cc)),
(!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
}
}
foreach op = ops in
def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))),
(!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX)
- fti.RegClass:$rs2, fti.AVL, vti.SEW)>;
+ fti.RegClass:$rs2, fti.AVL, vti.Log2SEW)>;
}
}
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
- ivti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
+ ivti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
}
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
- fvti.RegClass:$rs1, ivti.AVL, ivti.SEW)>;
+ fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>;
}
}
defvar fwti = vtiToWti.Wti;
def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
- ivti.RegClass:$rs1, ivti.AVL, ivti.SEW)>;
+ ivti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>;
}
}
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
- fvti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
+ fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
}
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
- iwti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
+ iwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
}
defvar fwti = vtiToWti.Wti;
def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#vti.LMul.MX)
- fwti.RegClass:$rs1, vti.AVL, vti.SEW)>;
+ fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>;
}
}
// 7.4. Vector Unit-Stride Instructions
foreach vti = !listconcat(FractionalGroupIntegerVectors,
FractionalGroupFloatVectors) in
- defm : VPatUSLoadStoreSDNode<vti.Vector, vti.SEW, vti.LMul,
- vti.AVL, vti.RegClass>;
+ defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
+ vti.AVL, vti.RegClass>;
foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in
- defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.SEW, vti.LMul,
- vti.RegClass>;
+ defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
+ vti.RegClass>;
foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in
- defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.SEW, vti.LMul,
- vti.RegClass>;
+ defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
+ vti.RegClass>;
foreach mti = AllMasks in
defm : VPatUSLoadStoreMaskSDNode<mti>;
def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)),
(vti.Vector vti.RegClass:$rs1)),
(!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX)
- vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>;
def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)),
(vti.Vector vti.RegClass:$rs1)),
(!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX)
- vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW)>;
}
// 12.3. Vector Integer Extension
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
- vti.AVL, vti.SEW)>;
+ vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1),
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
- vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
- vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>;
}
// 16.1. Vector Mask-Register Logical Instructions
foreach mti = AllMasks in {
def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),
(!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX)
- VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
+ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)),
(!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX)
- VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
+ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)),
(!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX)
- VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
+ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (vnot (and VR:$rs1, VR:$rs2))),
(!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
- VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
+ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (vnot (or VR:$rs1, VR:$rs2))),
(!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX)
- VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
+ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (vnot (xor VR:$rs1, VR:$rs2))),
(!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX)
- VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
+ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (and VR:$rs1, (vnot VR:$rs2))),
(!cast<Instruction>("PseudoVMANDNOT_MM_"#mti.LMul.MX)
- VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
+ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, (vnot VR:$rs2))),
(!cast<Instruction>("PseudoVMORNOT_MM_"#mti.LMul.MX)
- VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
+ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
// Handle vnot the same as the vnot.mm pseudoinstruction.
def : Pat<(mti.Mask (vnot VR:$rs)),
(!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
- VR:$rs, VR:$rs, mti.AVL, mti.SEW)>;
+ VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFMADD_VV_"# suffix)
fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd,
(fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFMSUB_VV_"# suffix)
fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd,
(fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFNMADD_VV_"# suffix)
fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd,
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix)
fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
// The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally
// commutable.
fvti.RegClass:$rd, fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1),
fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1),
(fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1),
(fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
// The splat might be negated.
def : Pat<(fvti.Vector (fma (fneg (splat_vector fvti.ScalarRegClass:$rs1)),
fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (fneg (splat_vector fvti.ScalarRegClass:$rs1)),
fvti.RegClass:$rd, fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
}
foreach vti = AllFloatVectors in {
// 14.8. Vector Floating-Point Square-Root Instruction
def : Pat<(fsqrt (vti.Vector vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX)
- vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
// 14.12. Vector Floating-Point Sign-Injection Instructions
def : Pat<(fabs (vti.Vector vti.RegClass:$rs)),
(!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX)
- vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>;
// Handle fneg with VFSGNJN using the same input for both operands.
def : Pat<(fneg (vti.Vector vti.RegClass:$rs)),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
- vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX)
- vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
(vti.Vector (splat_vector vti.ScalarRegClass:$rs2)))),
(!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
- vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
(vti.Vector (fneg vti.RegClass:$rs2)))),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
- vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
(vti.Vector (fneg (splat_vector vti.ScalarRegClass:$rs2))))),
(!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
- vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.SEW)>;
+ vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>;
}
// 14.11. Vector Floating-Point MIN/MAX Instructions
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
(splat_vector fvti.ScalarRegClass:$rs1),
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
- VMV0:$vm, fvti.AVL, fvti.SEW)>;
+ VMV0:$vm, fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
(splat_vector (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
- fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.SEW)>;
+ fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.Log2SEW)>;
}
// 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
defvar fwti = fvtiToFWti.Wti;
def : Pat<(fwti.Vector (fpextend (fvti.Vector fvti.RegClass:$rs1))),
(!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX)
- fvti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
+ fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
defvar fwti = fvtiToFWti.Wti;
def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))),
(!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX)
- fwti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
+ fwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
} // Predicates = [HasStdExtV, HasStdExtF]
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (SplatPat GPR:$rs1)),
(!cast<Instruction>("PseudoVMV_V_X_" # vti.LMul.MX)
- GPR:$rs1, vti.AVL, vti.SEW)>;
+ GPR:$rs1, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (SplatPat_simm5 simm5:$rs1)),
(!cast<Instruction>("PseudoVMV_V_I_" # vti.LMul.MX)
- simm5:$rs1, vti.AVL, vti.SEW)>;
+ simm5:$rs1, vti.AVL, vti.Log2SEW)>;
}
foreach mti = AllMasks in {
def : Pat<(mti.Mask immAllOnesV),
- (!cast<Instruction>("PseudoVMSET_M_"#mti.BX) mti.AVL, mti.SEW)>;
+ (!cast<Instruction>("PseudoVMSET_M_"#mti.BX) mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask immAllZerosV),
- (!cast<Instruction>("PseudoVMCLR_M_"#mti.BX) mti.AVL, mti.SEW)>;
+ (!cast<Instruction>("PseudoVMCLR_M_"#mti.BX) mti.AVL, mti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)),
(!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
(fvti.Scalar fvti.ScalarRegClass:$rs1),
- fvti.AVL, fvti.SEW)>;
+ fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))),
(!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
- 0, fvti.AVL, fvti.SEW)>;
+ 0, fvti.AVL, fvti.Log2SEW)>;
}
} // Predicates = [HasStdExtV, HasStdExtF]
// other index will have been custom-lowered to slide the vector correctly
// into place.
def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)),
- (vmv_f_s_inst vti.RegClass:$rs2, vti.SEW)>;
+ (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>;
}
multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> {
foreach vti = AllIntegerVectors in {
def : VPatBinaryVL_VV<vop, instruction_name,
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass>;
def : VPatBinaryVL_XI<vop, instruction_name, "VX",
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass,
SplatPat, GPR>;
}
Operand ImmType = simm5> {
foreach vti = AllIntegerVectors in {
def : VPatBinaryVL_VV<vop, instruction_name,
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass>;
def : VPatBinaryVL_XI<vop, instruction_name, "VX",
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass,
SplatPat, GPR>;
def : VPatBinaryVL_XI<vop, instruction_name, "VI",
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass,
!cast<ComplexPattern>(SplatPat#_#ImmType),
ImmType>;
multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> {
foreach vti = AllFloatVectors in {
def : VPatBinaryVL_VV<vop, instruction_name,
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass>;
def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
- vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+ vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass,
vti.ScalarRegClass>;
}
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
- GPR:$vl, fvti.SEW)>;
+ GPR:$vl, fvti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
}
// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped.
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl,
- vti.SEW)>;
+ vti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name,
(SplatPat (XLenVT GPR:$rs2)), cc,
(vti.Mask true_mask),
VLOpFrag)),
- (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
+ (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
(vti.Vector vti.RegClass:$rs1), invcc,
(vti.Mask true_mask),
VLOpFrag)),
- (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
+ (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name,
(SplatPat_simm5 simm5:$rs2), cc,
(vti.Mask true_mask),
VLOpFrag)),
- (instruction vti.RegClass:$rs1, XLenVT:$rs2, GPR:$vl, vti.SEW)>;
+ (instruction vti.RegClass:$rs1, XLenVT:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2),
(vti.Vector vti.RegClass:$rs1), invcc,
(vti.Mask true_mask),
VLOpFrag)),
- (instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.SEW)>;
+ (instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VIPlus1<VTypeInfo vti, string instruction_name,
(vti.Mask true_mask),
VLOpFrag)),
(instruction vti.RegClass:$rs1, (DecImm simm5:$rs2),
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
}
multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc,
(fvti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX)
- fvti.RegClass:$rs1, fvti.RegClass:$rs2, GPR:$vl, fvti.SEW)>;
+ fvti.RegClass:$rs1, fvti.RegClass:$rs2, GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1),
(SplatFPOp fvti.ScalarRegClass:$rs2),
cc,
VLOpFrag)),
(!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
- GPR:$vl, fvti.SEW)>;
+ GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (riscv_setcc_vl (SplatFPOp fvti.ScalarRegClass:$rs2),
(fvti.Vector fvti.RegClass:$rs1),
cc,
VLOpFrag)),
(!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
- GPR:$vl, fvti.SEW)>;
+ GPR:$vl, fvti.Log2SEW)>;
}
}
def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2),
true_mask, VLOpFrag)),
(!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX)
- fti.RegClass:$rs2, GPR:$vl, vti.SEW)>;
+ fti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
}
}
(fvti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
- fvti.RegClass:$rs1, GPR:$vl, ivti.SEW)>;
+ fvti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>;
}
}
(ivti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
- ivti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
+ ivti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
}
(fvti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
- fvti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
+ fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
}
(ivti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
- ivti.RegClass:$rs1, GPR:$vl, ivti.SEW)>;
+ ivti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>;
}
}
(fwti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#vti.LMul.MX)
- fwti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
+ fwti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
}
}
(iwti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
- iwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
+ iwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
}
(vti_m1.Vector (IMPLICIT_DEF)),
(vti.Vector vti.RegClass:$rs1),
(vti_m1.Vector VR:$rs2),
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
}
}
defvar store_instr = !cast<Instruction>("PseudoVSE"#vti.SEW#"_V_"#vti.LMul.MX);
// Load
def : Pat<(vti.Vector (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)),
- (load_instr BaseAddr:$rs1, GPR:$vl, vti.SEW)>;
+ (load_instr BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>;
// Store
def : Pat<(riscv_vse_vl (vti.Vector vti.RegClass:$rs2), BaseAddr:$rs1,
VLOpFrag),
- (store_instr vti.RegClass:$rs2, BaseAddr:$rs1, GPR:$vl, vti.SEW)>;
+ (store_instr vti.RegClass:$rs2, BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>;
}
foreach mti = AllMasks in {
defvar load_instr = !cast<Instruction>("PseudoVLE1_V_"#mti.BX);
defvar store_instr = !cast<Instruction>("PseudoVSE1_V_"#mti.BX);
def : Pat<(mti.Mask (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)),
- (load_instr BaseAddr:$rs1, GPR:$vl, mti.SEW)>;
+ (load_instr BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>;
def : Pat<(riscv_vse_vl (mti.Mask VR:$rs2), BaseAddr:$rs1,
VLOpFrag),
- (store_instr VR:$rs2, BaseAddr:$rs1, GPR:$vl, mti.SEW)>;
+ (store_instr VR:$rs2, BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>;
}
// 12.1. Vector Single-Width Integer Add and Subtract
(vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX)
- vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
(vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX)
- vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>;
}
// 12.3. Vector Integer Extension
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVNSRL_WI_"#fti.LMul.MX)
- vti.RegClass:$rs1, 0, GPR:$vl, fti.SEW)>;
+ vti.RegClass:$rs1, 0, GPR:$vl, fti.Log2SEW)>;
}
// 12.8. Vector Integer Comparison Instructions
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(SplatPat XLenVT:$rs1),
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
- vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
- vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
}
// 12.16. Vector Integer Move Instructions
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX)
- $rs2, GPR:$vl, vti.SEW)>;
+ $rs2, GPR:$vl, vti.Log2SEW)>;
defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5");
def : Pat<(vti.Vector (riscv_vmv_v_x_vl (ImmPat XLenVT:$imm5),
VLOpFrag)),
(!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX)
- XLenVT:$imm5, GPR:$vl, vti.SEW)>;
+ XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
VLOpFrag)),
(!cast<Instruction>("PseudoVFMADD_VV_"# suffix)
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd,
(riscv_fneg_vl vti.RegClass:$rs2,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFMSUB_VV_"# suffix)
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1,
(vti.Mask true_mask),
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMADD_VV_"# suffix)
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1,
(vti.Mask true_mask),
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix)
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
// The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally
// commutable.
VLOpFrag)),
(!cast<Instruction>("PseudoVFMADD_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
vti.RegClass:$rd,
(riscv_fneg_vl vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVFMSUB_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
(riscv_fneg_vl vti.RegClass:$rd,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
(riscv_fneg_vl vti.RegClass:$rd,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
// The splat might be negated.
def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1),
(vti.Mask true_mask),
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
}
// 14.11. Vector Floating-Point MIN/MAX Instructions
def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX)
- vti.RegClass:$rs2, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
// 14.12. Vector Floating-Point Sign-Injection Instructions
def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX)
- vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>;
// Handle fneg with VFSGNJN using the same input for both operands.
def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
- vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>;
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
(vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX)
- vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(riscv_fneg_vl vti.RegClass:$rs2,
(vti.Mask true_mask),
(vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
- vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(SplatFPOp vti.ScalarRegClass:$rs2),
(vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX)
- vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
}
foreach fvti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
- GPR:$vl, fvti.SEW)>;
+ GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp fvti.ScalarRegClass:$rs1),
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
- VMV0:$vm, GPR:$vl, fvti.SEW)>;
+ VMV0:$vm, GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
- fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.SEW)>;
+ fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.Log2SEW)>;
// 14.16. Vector Floating-Point Move Instruction
// If we're splatting fpimm0, use vmv.v.x vd, x0.
def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
(fvti.Scalar (fpimm0)), VLOpFrag)),
(!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
- 0, GPR:$vl, fvti.SEW)>;
+ 0, GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
(fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
(!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" #
fvti.LMul.MX)
(fvti.Scalar fvti.ScalarRegClass:$rs2),
- GPR:$vl, fvti.SEW)>;
+ GPR:$vl, fvti.Log2SEW)>;
// 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
defm : VPatConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFCVT_RTZ_X_F_V">;
(fvti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX)
- fvti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
+ fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
// 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions
(fwti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX)
- fwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
+ fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1),
(fwti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX)
- fwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
+ fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
}
foreach mti = AllMasks in {
// 16.1 Vector Mask-Register Logical Instructions
def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)),
- (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.SEW)>;
+ (!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)),
- (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.SEW)>;
+ (!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMANDNOT_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMORNOT_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// XOR is associative so we need 2 patterns for VMXNOR.
def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
- VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// Match the not idiom to the vnot.mm pseudo.
def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)),
(!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
- VR:$rs, VR:$rs, GPR:$vl, mti.SEW)>;
+ VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
// 16.2 Vector Mask Population Count vpopc
def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVPOPC_M_" # mti.BX)
- VR:$rs2, GPR:$vl, mti.SEW)>;
+ VR:$rs2, GPR:$vl, mti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
VLOpFrag)),
(!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
vti.RegClass:$merge,
- (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.SEW)>;
+ (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
(vti.Vector vti.RegClass:$rs1),
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX)
- vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX)
- vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX)
- vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(riscv_vrgather_vv_vl
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
- vti.Mask:$vm, GPR:$vl, vti.SEW)>;
+ vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
// emul = lmul * 16 / sew
defvar vlmul = vti.LMul;
defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret;
- defvar octuple_emul = !srl(!mul(octuple_lmul, 16), shift_amount<vti.SEW>.val);
+ defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emul_str = octuple_to_str<octuple_emul>.ret;
defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(inst)
- vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(riscv_vrgatherei16_vv_vl
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
- vti.Mask:$vm, GPR:$vl, vti.SEW)>;
+ vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
}
}
VLOpFrag)),
(!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
vti.RegClass:$merge,
- (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.SEW)>;
+ (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
defvar ivti = GetIntVTypeInfo<vti>.Vti;
def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
(ivti.Vector vti.RegClass:$rs1),
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX)
- vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX)
- vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX)
- vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(riscv_vrgather_vv_vl
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
- vti.Mask:$vm, GPR:$vl, vti.SEW)>;
+ vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
defvar vlmul = vti.LMul;
defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret;
- defvar octuple_emul = !srl(!mul(octuple_lmul, 16), shift_amount<vti.SEW>.val);
+ defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emul_str = octuple_to_str<octuple_emul>.ret;
defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(inst)
- vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(riscv_vrgatherei16_vv_vl
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
- vti.Mask:$vm, GPR:$vl, vti.SEW)>;
+ vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
}
}
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask),
VLOpFrag)),
- (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.SEW)>;
+ (!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX)
- vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX)
- vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
+ vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
}
foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
- GPR:$vl, vti.SEW)>;
+ GPR:$vl, vti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
%2:gpr = COPY $x12
%1:gpr = COPY $x11
%0:gpr = COPY $x10
- %4:vr = PseudoVLE64_V_M1 %1, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
- %5:vr = PseudoVLE64_V_M1 %2, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
- %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, %3, 64, implicit $vl, implicit $vtype
- PseudoVSE64_V_M1 killed %6, %0, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+ %4:vr = PseudoVLE64_V_M1 %1, %3, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+ %5:vr = PseudoVLE64_V_M1 %2, %3, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+ %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, %3, 6, implicit $vl, implicit $vtype
+ PseudoVSE64_V_M1 killed %6, %0, %3, 6, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
PseudoRET
...
# POST-INSERTER: %2:gpr = COPY $x11
# POST-INSERTER: %3:gpr = COPY $x10
# POST-INSERTER: dead %7:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype
-# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
# POST-INSERTER: dead %8:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype
-# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
# POST-INSERTER: dead %9:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype
-# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, 64, implicit $vl, implicit $vtype
+# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, 6, implicit $vl, implicit $vtype
# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype
-# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, 6, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
# CODEGEN: vsetvli a3, a3, e64,m1,ta,mu
# CODEGEN-NEXT: vle64.v v25, (a1)
; PRE-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8)
; PRE-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8)
-; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 64, implicit $vl, implicit $vtype
+; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 6, implicit $vl, implicit $vtype
; PRE-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8)
; POST-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8)
; POST-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8)
; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype
-; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, 64, implicit $vl, implicit $vtype
+; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, 6, implicit $vl, implicit $vtype
; POST-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8)
; CHECK: $x12 = PseudoReadVLENB
; CHECK: $x2 = SUB $x2, killed $x12
; CHECK: dead renamable $x11 = PseudoVSETVLI killed renamable $x11, 88, implicit-def $vl, implicit-def $vtype
- ; CHECK: renamable $v25 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+ ; CHECK: renamable $v25 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
; CHECK: $x11 = PseudoReadVLENB
; CHECK: $x10 = LUI 1048575
; CHECK: $x10 = ADDIW killed $x10, 1824
; CHECK: PseudoRET
%1:gpr = COPY $x11
%0:gpr = COPY $x10
- %2:vr = PseudoVLE64_V_M1 %0, %1, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+ %2:vr = PseudoVLE64_V_M1 %0, %1, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
%3:gpr = ADDI %stack.2, 0
VS1R_V killed %2:vr, %3:gpr
PseudoRET
; CHECK: liveins: $v8
; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v8
; CHECK: dead %2:gpr = PseudoVSETIVLI 1, 88, implicit-def $vl, implicit-def $vtype
- ; CHECK: PseudoVSE64_V_M1 [[COPY]], %stack.0.a, 1, 64, implicit $vl, implicit $vtype
+ ; CHECK: PseudoVSE64_V_M1 [[COPY]], %stack.0.a, 1, 6, implicit $vl, implicit $vtype
; CHECK: [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load 8 from %ir.a)
; CHECK: $x10 = COPY [[LD]]
; CHECK: PseudoRET implicit $x10
; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
; CHECK: dead %5:gpr = PseudoVSETVLI $x0, 91, implicit-def $vl, implicit-def $vtype
- ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $noreg, 64, implicit $vl, implicit $vtype :: (load 64 from %ir.a, align 8)
+ ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $noreg, 6, implicit $vl, implicit $vtype :: (load 64 from %ir.a, align 8)
; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
; CHECK: PseudoRET implicit $v8m8
%1:vr = COPY $v0
$v0 = COPY %1
%3:vrm8 = IMPLICIT_DEF
%4:vrm8nov0 = COPY %3
- %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 64, implicit $vl, implicit $vtype :: (load 64 from %ir.a, align 8)
+ %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6, implicit $vl, implicit $vtype :: (load 64 from %ir.a, align 8)
$v8m8 = COPY %2
PseudoRET implicit $v8m8