inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
+/// Matcher for a specific constant value.
+struct SpecificConstantMatch {
+ int64_t RequestedVal;
+ SpecificConstantMatch(int64_t RequestedVal) : RequestedVal(RequestedVal) {}
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ int64_t MatchedVal;
+ return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal;
+ }
+};
+
+/// Matches a constant equal to \p RequestedValue.
+inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) {
+ return SpecificConstantMatch(RequestedValue);
+}
+
+/// Matches an integer 0.
+inline SpecificConstantMatch m_ZeroInt() { return SpecificConstantMatch(0); }
+
// TODO: Rework this for different kinds of MachineOperand.
// Currently assumes the Src for a match is a register.
// We might want to support taking in some MachineOperands and call getReg on
TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2);
}
+/// Matches a register negated by a G_SUB.
+/// G_SUB 0, %negated_reg
+template <typename SrcTy>
+inline BinaryOp_match<SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB>
+m_Neg(const SrcTy &&Src) {
+ return m_GSub(m_ZeroInt(), Src);
+}
+
} // namespace GMIPatternMatch
} // namespace llvm
// ((0-A) + B) -> B - A
// (A + (0-B)) -> A - B
auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
- int64_t Cst;
- if (!mi_match(MaybeSub, MRI, m_GSub(m_ICst(Cst), m_Reg(NewRHS))) ||
- Cst != 0)
+ if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
return false;
NewLHS = MaybeNewLHS;
return true;
const unsigned NumOps = MI.getNumOperands();
for (unsigned I = 1; I != NumOps; ++I) {
Register Element = MI.getOperand(I).getReg();
- int64_t ElementValue;
- if (!mi_match(Element, MRI, m_ICst(ElementValue)) ||
- ElementValue != SplatValue)
+ if (!mi_match(Element, MRI, m_SpecificICst(SplatValue)))
return false;
}
// Also take the opportunity here to try to do some optimization.
// Try to convert this into a G_SUB if the offset is a 0-x negate idiom.
Register NegatedReg;
- int64_t Cst;
- if (!mi_match(I.getOperand(2).getReg(), MRI,
- m_GSub(m_ICst(Cst), m_Reg(NegatedReg))) ||
- Cst != 0)
+ if (!mi_match(I.getOperand(2).getReg(), MRI, m_Neg(m_Reg(NegatedReg))))
return true;
I.getOperand(2).setReg(NegatedReg);
I.setDesc(TII.get(TargetOpcode::G_SUB));
return false;
// Match the index constant 0.
- int64_t Index = 0;
- if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ICst(Index)) || Index)
+ if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ZeroInt()))
return false;
MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP, MI.getOperand(0).getReg(),
Register ShiftSrc0;
Register ShiftSrc1;
- int64_t ShiftAmt;
// With multiple uses of the shift, this will duplicate the shift and
// increase register pressure.
// (build_vector_trunc $src0, $src1)
// => (S_PACK_LL_B32_B16 $src0, $src1)
- // FIXME: This is an inconvenient way to check a specific value
bool Shift0 = mi_match(
- Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) &&
- ShiftAmt == 16;
+ Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
bool Shift1 = mi_match(
- Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) &&
- ShiftAmt == 16;
+ Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
if (Shift0 && Shift1) {
if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
return false;
- int64_t MergeRHS;
- if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(MergeRHS)) &&
- MergeRHS == 0) {
+ if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
return Def->getOperand(1).getReg();
}
match = mi_match(MIBAdd.getReg(0), *MRI, m_OneUse(m_GAdd(m_Reg(), m_Reg())));
EXPECT_FALSE(match);
}
+
+TEST_F(AArch64GISelMITest, MatchSpecificConstant) {
+ setUp();
+ if (!TM)
+ return;
+
+ // Basic case: Can we match a G_CONSTANT with a specific value?
+ auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
+ EXPECT_TRUE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(42)));
+ EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_SpecificICst(123)));
+
+ // Test that this works inside of a more complex pattern.
+ LLT s64 = LLT::scalar(64);
+ auto MIBAdd = B.buildAdd(s64, Copies[0], FortyTwo);
+ EXPECT_TRUE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(42)));
+
+ // Wrong constant.
+ EXPECT_FALSE(mi_match(MIBAdd.getReg(2), *MRI, m_SpecificICst(123)));
+
+ // No constant on the LHS.
+ EXPECT_FALSE(mi_match(MIBAdd.getReg(1), *MRI, m_SpecificICst(42)));
+}
+
+TEST_F(AArch64GISelMITest, MatchZeroInt) {
+ setUp();
+ if (!TM)
+ return;
+ auto Zero = B.buildConstant(LLT::scalar(64), 0);
+ EXPECT_TRUE(mi_match(Zero.getReg(0), *MRI, m_ZeroInt()));
+
+ auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
+ EXPECT_FALSE(mi_match(FortyTwo.getReg(0), *MRI, m_ZeroInt()));
+}
+
+TEST_F(AArch64GISelMITest, MatchNeg) {
+ setUp();
+ if (!TM)
+ return;
+
+ LLT s64 = LLT::scalar(64);
+ auto Zero = B.buildConstant(LLT::scalar(64), 0);
+ auto NegInst = B.buildSub(s64, Zero, Copies[0]);
+ Register NegatedReg;
+
+ // Match: G_SUB = 0, %Reg
+ EXPECT_TRUE(mi_match(NegInst.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
+ EXPECT_EQ(NegatedReg, Copies[0]);
+
+ // Don't match: G_SUB = %Reg, 0
+ auto NotNegInst1 = B.buildSub(s64, Copies[0], Zero);
+ EXPECT_FALSE(mi_match(NotNegInst1.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
+
+ // Don't match: G_SUB = 42, %Reg
+ auto FortyTwo = B.buildConstant(LLT::scalar(64), 42);
+ auto NotNegInst2 = B.buildSub(s64, FortyTwo, Copies[0]);
+ EXPECT_FALSE(mi_match(NotNegInst2.getReg(0), *MRI, m_Neg(m_Reg(NegatedReg))));
+
+ // Complex testcase.
+ // %sub = G_SUB = 0, %negated_reg
+ // %add = G_ADD = %x, %sub
+ auto AddInst = B.buildAdd(s64, Copies[1], NegInst);
+ NegatedReg = Register();
+ EXPECT_TRUE(mi_match(AddInst.getReg(2), *MRI, m_Neg(m_Reg(NegatedReg))));
+ EXPECT_EQ(NegatedReg, Copies[0]);
+}
} // namespace
int main(int argc, char **argv) {