This change adds a policy operand to the helper class which is used for binary ops like vadd, but also, possibly surprisingly, some of the vslide variants. This allows us to represent the tail agnostic state with this pseudo family - previously, we could only represent tail undefined and tail undisturbed. (Since these don't have a mask, they're always mask undefined.)
This is NFC because no current producer uses the tail agnostic state. This will change in an upcoming change to doPeepholeMaskedRVV.
Differential Revision: https://reviews.llvm.org/D153067
defvar TAIL_UNDISTURBED_MASK_UNDISTURBED = 0;
defvar TAIL_AGNOSTIC = 1;
+defvar TU_MU = 0;
defvar TA_MA = 3;
//===----------------------------------------------------------------------===//
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
+ (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
+ ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
+ let HasVecPolicyOp = 1;
}
// Special version of VPseudoBinaryNoMask where we pretend the first source is
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
- GPR:$vl, sew)>;
+ GPR:$vl, sew, TU_MU)>;
// Same as above but source operands are swapped.
class VPatBinaryNoMaskSwapped<string intrinsic_name,
vti.RegClass:$rs1,
vti.RegClass:$rs2,
GPR:$vl,
- vti.Log2SEW)>;
+ vti.Log2SEW, TU_MU)>;
def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
(result_type (IMPLICIT_DEF)),
op_reg_class:$rs1,
op_reg_class:$rs2,
- avl, log2sew)>;
+ avl, log2sew, TA_MA)>;
class VPatBinarySDNode_XI<SDPatternOperator vop,
string instruction_name,
(result_type (IMPLICIT_DEF)),
vop_reg_class:$rs1,
xop_kind:$rs2,
- avl, log2sew)>;
+ avl, log2sew, TA_MA)>;
multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name,
list<VTypeInfo> vtilist = AllIntegerVectors,
(result_type (IMPLICIT_DEF)),
vop_reg_class:$rs1,
(xop_type xop_kind:$rs2),
- avl, log2sew)>;
+ avl, log2sew, TA_MA)>;
multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name,
bit isSEWAware = 0> {
(fvti.Vector (IMPLICIT_DEF)),
fvti.RegClass:$rs1,
(fvti.Scalar fvti.ScalarRegClass:$rs2),
- fvti.AVL, fvti.Log2SEW)>;
+ fvti.AVL, fvti.Log2SEW, TA_MA)>;
}
multiclass VPatIntegerSetCCSDNode_VV<string instruction_name,
GPR:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX#"_TU")
- vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
+ vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>;
def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector undef),
(vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
GPR:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX#"_TU")
- vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
+ vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>;
}
}
vti.Scalar:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFSLIDE1UP_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_TU")
- vti.RegClass:$rd, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
+ vti.RegClass:$rd, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>;
def : Pat<(vti.Vector (riscv_fslide1down_vl (vti.Vector undef),
(vti.Vector vti.RegClass:$rs1),
vti.Scalar:$rs2, (vti.Mask true_mask),
vti.Scalar:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFSLIDE1DOWN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_TU")
- vti.RegClass:$rd, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
+ vti.RegClass:$rd, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>;
}
}