return Result;
}
-// Returns true if the Node is an ISD::AND with a constant argument. If so,
-// set Mask to that constant value.
-static bool isConstantMask(SDNode *Node, uint64_t &Mask) {
- if (Node->getOpcode() == ISD::AND &&
- Node->getOperand(1).getOpcode() == ISD::Constant) {
- Mask = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
- return true;
- }
- return false;
-}
-
void RISCVDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we have already selected.
if (Node->isMachineOpcode()) {
ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
return;
}
- case ISD::SRL: {
- if (!Subtarget->is64Bit())
- break;
- SDNode *Op0 = Node->getOperand(0).getNode();
- uint64_t Mask;
- // Match (srl (and val, mask), imm) where the result would be a
- // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result
- // is equivalent to this (SimplifyDemandedBits may have removed lower bits
- // from the mask that aren't necessary due to the right-shifting).
- if (isa<ConstantSDNode>(Node->getOperand(1)) && isConstantMask(Op0, Mask)) {
- uint64_t ShAmt = Node->getConstantOperandVal(1);
-
- if ((Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff) {
- SDValue ShAmtVal = CurDAG->getTargetConstant(ShAmt, DL, XLenVT);
- CurDAG->SelectNodeTo(Node, RISCV::SRLIW, XLenVT, Op0->getOperand(0),
- ShAmtVal);
- return;
- }
- }
- break;
- }
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
switch (IntNo) {
return false;
}
-// Check that it is a SLOI (Shift Left Ones Immediate). We first check that
-// it is the right node tree:
+// Match (srl (and val, mask), imm) where the result would be a
+// zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result
+// is equivalent to this (SimplifyDemandedBits may have removed lower bits
+// from the mask that aren't necessary due to the right-shifting).
+bool RISCVDAGToDAGISel::MatchSRLIW(SDNode *N) const {
+ assert(N->getOpcode() == ISD::SRL);
+ assert(N->getOperand(0).getOpcode() == ISD::AND);
+ assert(isa<ConstantSDNode>(N->getOperand(1)));
+ assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+ // The IsRV64 predicate is checked after PatFrag predicates so we can get
+ // here even on RV32.
+ if (!Subtarget->is64Bit())
+ return false;
+
+ SDValue And = N->getOperand(0);
+ uint64_t ShAmt = N->getConstantOperandVal(1);
+ uint64_t Mask = And.getConstantOperandVal(1);
+ return (Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff;
+}
+
+// Check that it is a SLOI (Shift Left Ones Immediate). A PatFrag has already
+// determined it has the right structure:
//
// (OR (SHL RS1, VC2), VC1)
//
-// and then we check that VC1, the mask used to fill with ones, is compatible
+// Check that VC1, the mask used to fill with ones, is compatible
// with VC2, the shamt:
//
-// VC1 == maskTrailingOnes<uint64_t>(VC2)
-
-bool RISCVDAGToDAGISel::SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt) {
- MVT XLenVT = Subtarget->getXLenVT();
- if (N.getOpcode() == ISD::OR) {
- SDValue Or = N;
- if (Or.getOperand(0).getOpcode() == ISD::SHL) {
- SDValue Shl = Or.getOperand(0);
- if (isa<ConstantSDNode>(Shl.getOperand(1)) &&
- isa<ConstantSDNode>(Or.getOperand(1))) {
- if (XLenVT == MVT::i64) {
- uint64_t VC1 = Or.getConstantOperandVal(1);
- uint64_t VC2 = Shl.getConstantOperandVal(1);
- if (VC1 == maskTrailingOnes<uint64_t>(VC2)) {
- RS1 = Shl.getOperand(0);
- Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
- Shl.getOperand(1).getValueType());
- return true;
- }
- }
- if (XLenVT == MVT::i32) {
- uint32_t VC1 = Or.getConstantOperandVal(1);
- uint32_t VC2 = Shl.getConstantOperandVal(1);
- if (VC1 == maskTrailingOnes<uint32_t>(VC2)) {
- RS1 = Shl.getOperand(0);
- Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
- Shl.getOperand(1).getValueType());
- return true;
- }
- }
- }
- }
+// VC1 == maskTrailingOnes(VC2)
+//
+bool RISCVDAGToDAGISel::MatchSLOI(SDNode *N) const {
+ assert(N->getOpcode() == ISD::OR);
+ assert(N->getOperand(0).getOpcode() == ISD::SHL);
+ assert(isa<ConstantSDNode>(N->getOperand(1)));
+ assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+ SDValue Shl = N->getOperand(0);
+ if (Subtarget->is64Bit()) {
+ uint64_t VC1 = N->getConstantOperandVal(1);
+ uint64_t VC2 = Shl.getConstantOperandVal(1);
+ return VC1 == maskTrailingOnes<uint64_t>(VC2);
}
- return false;
+
+ uint32_t VC1 = N->getConstantOperandVal(1);
+ uint32_t VC2 = Shl.getConstantOperandVal(1);
+ return VC1 == maskTrailingOnes<uint32_t>(VC2);
}
-// Check that it is a SROI (Shift Right Ones Immediate). We first check that
-// it is the right node tree:
+// Check that it is a SROI (Shift Right Ones Immediate). A PatFrag has already
+// determined it has the right structure:
//
// (OR (SRL RS1, VC2), VC1)
//
-// and then we check that VC1, the mask used to fill with ones, is compatible
+// Check that VC1, the mask used to fill with ones, is compatible
// with VC2, the shamt:
//
-// VC1 == maskLeadingOnes<uint64_t>(VC2)
-
-bool RISCVDAGToDAGISel::SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt) {
- MVT XLenVT = Subtarget->getXLenVT();
- if (N.getOpcode() == ISD::OR) {
- SDValue Or = N;
- if (Or.getOperand(0).getOpcode() == ISD::SRL) {
- SDValue Srl = Or.getOperand(0);
- if (isa<ConstantSDNode>(Srl.getOperand(1)) &&
- isa<ConstantSDNode>(Or.getOperand(1))) {
- if (XLenVT == MVT::i64) {
- uint64_t VC1 = Or.getConstantOperandVal(1);
- uint64_t VC2 = Srl.getConstantOperandVal(1);
- if (VC1 == maskLeadingOnes<uint64_t>(VC2)) {
- RS1 = Srl.getOperand(0);
- Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
- Srl.getOperand(1).getValueType());
- return true;
- }
- }
- if (XLenVT == MVT::i32) {
- uint32_t VC1 = Or.getConstantOperandVal(1);
- uint32_t VC2 = Srl.getConstantOperandVal(1);
- if (VC1 == maskLeadingOnes<uint32_t>(VC2)) {
- RS1 = Srl.getOperand(0);
- Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
- Srl.getOperand(1).getValueType());
- return true;
- }
- }
- }
- }
- }
- return false;
-}
-
-// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
-// on RV64).
-// SLLIUW is the same as SLLI except for the fact that it clears the bits
-// XLEN-1:32 of the input RS1 before shifting.
-// We first check that it is the right node tree:
-//
-// (AND (SHL RS1, VC2), VC1)
-//
-// We check that VC2, the shamt is less than 32, otherwise the pattern is
-// exactly the same as SLLI and we give priority to that.
-// Eventually we check that that VC1, the mask used to clear the upper 32 bits
-// of RS1, is correct:
+// VC1 == maskLeadingOnes(VC2)
//
-// VC1 == (0xFFFFFFFF << VC2)
-
-bool RISCVDAGToDAGISel::SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt) {
- if (N.getOpcode() == ISD::AND && Subtarget->getXLenVT() == MVT::i64) {
- SDValue And = N;
- if (And.getOperand(0).getOpcode() == ISD::SHL) {
- SDValue Shl = And.getOperand(0);
- if (isa<ConstantSDNode>(Shl.getOperand(1)) &&
- isa<ConstantSDNode>(And.getOperand(1))) {
- uint64_t VC1 = And.getConstantOperandVal(1);
- uint64_t VC2 = Shl.getConstantOperandVal(1);
- if (VC2 < 32 && VC1 == ((uint64_t)0xFFFFFFFF << VC2)) {
- RS1 = Shl.getOperand(0);
- Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
- Shl.getOperand(1).getValueType());
- return true;
- }
- }
- }
+bool RISCVDAGToDAGISel::MatchSROI(SDNode *N) const {
+ assert(N->getOpcode() == ISD::OR);
+ assert(N->getOperand(0).getOpcode() == ISD::SRL);
+ assert(isa<ConstantSDNode>(N->getOperand(1)));
+ assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+ SDValue Srl = N->getOperand(0);
+ if (Subtarget->is64Bit()) {
+ uint64_t VC1 = N->getConstantOperandVal(1);
+ uint64_t VC2 = Srl.getConstantOperandVal(1);
+ return VC1 == maskLeadingOnes<uint64_t>(VC2);
}
- return false;
+
+ uint32_t VC1 = N->getConstantOperandVal(1);
+ uint32_t VC2 = Srl.getConstantOperandVal(1);
+ return VC1 == maskLeadingOnes<uint32_t>(VC2);
}
-// Check that it is a SLOIW (Shift Left Ones Immediate i32 on RV64).
-// We first check that it is the right node tree:
+// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). A PatFrag
+// has already determined it has the right structure:
//
-// (SIGN_EXTEND_INREG (OR (SHL RS1, VC2), VC1))
+// (OR (SRL RS1, VC2), VC1)
//
// and then we check that VC1, the mask used to fill with ones, is compatible
// with VC2, the shamt:
//
// VC2 < 32
-// VC1 == maskTrailingOnes<uint64_t>(VC2)
-
-bool RISCVDAGToDAGISel::SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt) {
- assert(Subtarget->is64Bit() && "SLOIW should only be matched on RV64");
- if (N.getOpcode() != ISD::SIGN_EXTEND_INREG ||
- cast<VTSDNode>(N.getOperand(1))->getVT() != MVT::i32)
+// VC1 == maskTrailingZeros<uint64_t>(32 - VC2)
+//
+bool RISCVDAGToDAGISel::MatchSROIW(SDNode *N) const {
+ assert(N->getOpcode() == ISD::OR);
+ assert(N->getOperand(0).getOpcode() == ISD::SRL);
+ assert(isa<ConstantSDNode>(N->getOperand(1)));
+ assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+ // The IsRV64 predicate is checked after PatFrag predicates so we can get
+ // here even on RV32.
+ if (!Subtarget->is64Bit())
return false;
- SDValue Or = N.getOperand(0);
-
- if (Or.getOpcode() != ISD::OR || !isa<ConstantSDNode>(Or.getOperand(1)))
- return false;
-
- SDValue Shl = Or.getOperand(0);
- if (Shl.getOpcode() != ISD::SHL || !isa<ConstantSDNode>(Shl.getOperand(1)))
- return false;
-
- uint64_t VC1 = Or.getConstantOperandVal(1);
- uint64_t VC2 = Shl.getConstantOperandVal(1);
-
- if (VC2 >= 32 || VC1 != maskTrailingOnes<uint64_t>(VC2))
- return false;
+ SDValue Srl = N->getOperand(0);
+ uint64_t VC1 = N->getConstantOperandVal(1);
+ uint64_t VC2 = Srl.getConstantOperandVal(1);
- RS1 = Shl.getOperand(0);
- Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
- Shl.getOperand(1).getValueType());
- return true;
+ // Immediate range should be enforced by uimm5 predicate.
+ assert(VC2 < 32 && "Unexpected immediate");
+ return VC1 == maskTrailingZeros<uint64_t>(32 - VC2);
}
-// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64).
-// We first check that it is the right node tree:
+// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
+// on RV64).
+// SLLIUW is the same as SLLI except for the fact that it clears the bits
+// XLEN-1:32 of the input RS1 before shifting.
+// A PatFrag has already checked that it has the right structure:
//
-// (OR (SRL RS1, VC2), VC1)
+// (AND (SHL RS1, VC2), VC1)
//
-// and then we check that VC1, the mask used to fill with ones, is compatible
-// with VC2, the shamt:
+// We check that VC2, the shamt is less than 32, otherwise the pattern is
+// exactly the same as SLLI and we give priority to that.
+// Eventually we check that VC1, the mask used to clear the upper 32 bits
+// of RS1, is correct:
//
-// VC2 < 32
-// VC1 == maskTrailingZeros<uint64_t>(32 - VC2)
+// VC1 == (0xFFFFFFFF << VC2)
//
-bool RISCVDAGToDAGISel::SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt) {
- assert(Subtarget->is64Bit() && "SROIW should only be matched on RV64");
- if (N.getOpcode() != ISD::OR || !isa<ConstantSDNode>(N.getOperand(1)))
- return false;
-
- SDValue Srl = N.getOperand(0);
- if (Srl.getOpcode() != ISD::SRL || !isa<ConstantSDNode>(Srl.getOperand(1)))
+bool RISCVDAGToDAGISel::MatchSLLIUW(SDNode *N) const {
+ assert(N->getOpcode() == ISD::AND);
+ assert(N->getOperand(0).getOpcode() == ISD::SHL);
+ assert(isa<ConstantSDNode>(N->getOperand(1)));
+ assert(isa<ConstantSDNode>(N->getOperand(0).getOperand(1)));
+
+ // The IsRV64 predicate is checked after PatFrag predicates so we can get
+ // here even on RV32.
+ if (!Subtarget->is64Bit())
return false;
- uint64_t VC1 = N.getConstantOperandVal(1);
- uint64_t VC2 = Srl.getConstantOperandVal(1);
+ SDValue Shl = N->getOperand(0);
+ uint64_t VC1 = N->getConstantOperandVal(1);
+ uint64_t VC2 = Shl.getConstantOperandVal(1);
- if (VC2 >= 32 || VC1 != maskTrailingZeros<uint64_t>(32 - VC2))
- return false;
-
- RS1 = Srl.getOperand(0);
- Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N),
- Srl.getOperand(1).getValueType());
- return true;
+ // Immediate range should be enforced by uimm5 predicate.
+ assert(VC2 < 32 && "Unexpected immediate");
+ return VC1 == ((uint64_t)0xFFFFFFFF << VC2);
}
bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
N->getValueType(0));
}]>;
+// Check that it is a SLOI (Shift Left Ones Immediate).
+def SLOIPat : PatFrag<(ops node:$A, node:$B),
+ (or (shl node:$A, node:$B), imm), [{
+ return MatchSLOI(N);
+}]>;
+
+// Check that it is a SROI (Shift Right Ones Immediate).
+def SROIPat : PatFrag<(ops node:$A, node:$B),
+ (or (srl node:$A, node:$B), imm), [{
+ return MatchSROI(N);
+}]>;
+
+// Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64).
+def SROIWPat : PatFrag<(ops node:$A, node:$B),
+ (or (srl node:$A, node:$B), imm), [{
+ return MatchSROIW(N);
+}]>;
+
+// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
+// on RV64).
+def SLLIUWPat : PatFrag<(ops node:$A, node:$B),
+ (and (shl node:$A, node:$B), imm), [{
+ return MatchSLLIUW(N);
+}]>;
+
// Checks if this mask has a single 0 bit and cannot be used with ANDI.
def SBCLRMask : ImmLeaf<XLenVT, [{
if (Subtarget->is64Bit())
//===----------------------------------------------------------------------===//
// Codegen patterns
//===----------------------------------------------------------------------===//
-def SLOIPat : ComplexPattern<XLenVT, 2, "SelectSLOI", [or]>;
-def SROIPat : ComplexPattern<XLenVT, 2, "SelectSROI", [or]>;
-def SLLIUWPat : ComplexPattern<i64, 2, "SelectSLLIUW", [and]>;
-def SLOIWPat : ComplexPattern<i64, 2, "SelectSLOIW", [sext_inreg]>;
-def SROIWPat : ComplexPattern<i64, 2, "SelectSROIW", [or]>;
let Predicates = [HasStdExtZbbOrZbp] in {
def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbb, IsRV64] in {
def : Pat<(and (add GPR:$rs, simm12:$simm12), (i64 0xFFFFFFFF)),
(ADDIWU GPR:$rs, simm12:$simm12)>;
-def : Pat<(SLLIUWPat GPR:$rs1, uimmlog2xlen:$shamt),
- (SLLIUW GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : Pat<(SLLIUWPat GPR:$rs1, uimm5:$shamt),
+ (SLLIUW GPR:$rs1, uimm5:$shamt)>;
def : Pat<(and (add GPR:$rs1, GPR:$rs2), (i64 0xFFFFFFFF)),
(ADDWU GPR:$rs1, GPR:$rs2)>;
def : Pat<(and (sub GPR:$rs1, GPR:$rs2), (i64 0xFFFFFFFF)),
} // Predicates = [HasStdExtZbs, IsRV64]
let Predicates = [HasStdExtZbb, IsRV64] in {
-def : Pat<(SLOIWPat GPR:$rs1, uimmlog2xlen:$shamt),
- (SLOIW GPR:$rs1, uimmlog2xlen:$shamt)>;
-def : Pat<(SROIWPat GPR:$rs1, uimmlog2xlen:$shamt),
- (SROIW GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : Pat<(sext_inreg (SLOIPat GPR:$rs1, uimm5:$shamt), i32),
+ (SLOIW GPR:$rs1, uimm5:$shamt)>;
+def : Pat<(SROIWPat GPR:$rs1, uimm5:$shamt),
+ (SROIW GPR:$rs1, uimm5:$shamt)>;
} // Predicates = [HasStdExtZbb, IsRV64]
let Predicates = [HasStdExtZbp, IsRV64] in {