// Hexagon Subtarget features.
//===----------------------------------------------------------------------===//
-// Hexagon Archtectures
-def ArchV2 : SubtargetFeature<"v2", "HexagonArchVersion", "V2",
- "Hexagon v2">;
-def ArchV3 : SubtargetFeature<"v3", "HexagonArchVersion", "V3",
- "Hexagon v3">;
-def ArchV4 : SubtargetFeature<"v4", "HexagonArchVersion", "V4",
- "Hexagon v4">;
-def ArchV5 : SubtargetFeature<"v5", "HexagonArchVersion", "V5",
- "Hexagon v5">;
+// Hexagon Architectures
+def ArchV4: SubtargetFeature<"v4", "HexagonArchVersion", "V4", "Hexagon V4">;
+def ArchV5: SubtargetFeature<"v5", "HexagonArchVersion", "V5", "Hexagon V5">;
//===----------------------------------------------------------------------===//
// Hexagon Instruction Predicate Definitions.
//===----------------------------------------------------------------------===//
-def HasV2T : Predicate<"Subtarget->hasV2TOps()">;
-def HasV2TOnly : Predicate<"Subtarget->hasV2TOpsOnly()">;
-def NoV2T : Predicate<"!Subtarget->hasV2TOps()">;
-def HasV3T : Predicate<"Subtarget->hasV3TOps()">;
-def HasV3TOnly : Predicate<"Subtarget->hasV3TOpsOnly()">;
-def NoV3T : Predicate<"!Subtarget->hasV3TOps()">;
-def HasV4T : Predicate<"Subtarget->hasV4TOps()">;
-def NoV4T : Predicate<"!Subtarget->hasV4TOps()">;
def HasV5T : Predicate<"Subtarget->hasV5TOps()">;
def NoV5T : Predicate<"!Subtarget->hasV5TOps()">;
def UseMEMOP : Predicate<"Subtarget->useMemOps()">;
list<SubtargetFeature> Features>
: ProcessorModel<Name, Model, Features>;
-def : Proc<"hexagonv4", HexagonModelV4, [ArchV2, ArchV3, ArchV4]>;
-def : Proc<"hexagonv5", HexagonModelV4, [ArchV2, ArchV3, ArchV4, ArchV5]>;
+def : Proc<"hexagonv4", HexagonModelV4,
+ [ArchV4]>;
+def : Proc<"hexagonv5", HexagonModelV4,
+ [ArchV4, ArchV5]>;
//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
LowRegInst->getOpcode() == Hexagon::TFRI_V4) &&
"Assume individual instructions are of a combinable type");
- const HexagonRegisterInfo *QRI =
- static_cast<const HexagonRegisterInfo *>(TRI);
-
- // V4 added some combine variations (mixed immediate and register source
- // operands), if we are on < V4 we can only combine 2 register-to-register
- // moves and 2 immediate-to-register moves. We also don't have
- // constant-extenders.
- if (!QRI->Subtarget.hasV4TOps())
- return HighRegInst->getOpcode() == LowRegInst->getOpcode() &&
- !isGreaterThan8BitTFRI(HighRegInst) &&
- !isGreaterThan6BitTFRI(LowRegInst);
-
// There is no combine of two constant extended values.
if ((HighRegInst->getOpcode() == Hexagon::TFRI_V4 ||
isGreaterThan8BitTFRI(HighRegInst)) &&
}
// Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
// versions.
- if (MF.getSubtarget<HexagonSubtarget>().hasV4TOps() &&
- MBBI->getOpcode() == Hexagon::JMPret && !DisableDeallocRet) {
+ if (MBBI->getOpcode() == Hexagon::JMPret && !DisableDeallocRet) {
// Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC
// instruction if we encounter it.
MachineBasicBlock::iterator BeforeJMPR =
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
- if (Subtarget->isSubtargetV2()) {
- setExceptionPointerRegister(Hexagon::R20);
- setExceptionSelectorRegister(Hexagon::R21);
- } else {
- setExceptionPointerRegister(Hexagon::R0);
- setExceptionSelectorRegister(Hexagon::R1);
- }
+ setExceptionPointerRegister(Hexagon::R0);
+ setExceptionSelectorRegister(Hexagon::R1);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex.
setOperationAction(ISD::VASTART, MVT::Other, Custom);
}
bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
- // Constant extenders are allowed only for V4 and above.
- if (!Subtarget.hasV4TOps())
- return false;
-
const MCInstrDesc &MID = MI->getDesc();
const uint64_t F = MID.TSFlags;
if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
case Hexagon::A2_sxth:
case Hexagon::A2_zxtb:
case Hexagon::A2_zxth:
- return Subtarget.hasV4TOps();
+ return true;
}
return true;
const uint64_t F = MI->getDesc().TSFlags;
return ((F >> HexagonII::mayNVStorePos) &
- HexagonII::mayNVStoreMask &
- QRI.Subtarget.hasV4TOps());
+ HexagonII::mayNVStoreMask);
}
bool
case Hexagon::L2_ploadruhf_io:
case Hexagon::L2_ploadrubt_io:
case Hexagon::L2_ploadrubf_io:
- return true;
case Hexagon::L2_ploadrdt_pi:
case Hexagon::L2_ploadrdf_pi:
case Hexagon::L2_ploadrit_pi:
case Hexagon::L2_ploadruhf_pi:
case Hexagon::L2_ploadrubt_pi:
case Hexagon::L2_ploadrubf_pi:
- return QRI.Subtarget.hasV4TOps();
case Hexagon::L4_ploadrdt_rr:
case Hexagon::L4_ploadrdf_rr:
case Hexagon::L4_ploadrbt_rr:
case Hexagon::L4_ploadruhf_rr:
case Hexagon::L4_ploadrit_rr:
case Hexagon::L4_ploadrif_rr:
- return QRI.Subtarget.hasV4TOps();
+ return true;
}
}
case Hexagon::S4_pstorerif_rr:
case Hexagon::S2_pstorerit_pi:
case Hexagon::S2_pstorerif_pi:
- return QRI.Subtarget.hasV4TOps();
// V4 global address store before promoting to dot new.
case Hexagon::S4_pstorerdt_abs:
case Hexagon::S4_pstorerhf_abs:
case Hexagon::S4_pstorerit_abs:
case Hexagon::S4_pstorerif_abs:
- return QRI.Subtarget.hasV4TOps();
+ return true;
// Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
// from the "Conditional Store" list. Because a predicated new value store
}
bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const {
-
- // Constant extenders are allowed only for V4 and above.
- if (!Subtarget.hasV4TOps())
- return false;
-
const uint64_t F = MI->getDesc().TSFlags;
unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
if (isExtended) // Instruction must be extended.
// Returns true if a particular operand is extendable for an instruction.
bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI,
unsigned short OperandNum) const {
- // Constant extenders are allowed only for V4 and above.
- if (!Subtarget.hasV4TOps())
- return false;
-
const uint64_t F = MI->getDesc().TSFlags;
return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
SDNode<"HexagonISD::WrapperCombineIR_V4", SDTHexagonI64I32I32>;
def : Pat <(HexagonWrapperCombineRI_V4 IntRegs:$r, s8ExtPred:$i),
- (A4_combineri IntRegs:$r, s8ExtPred:$i)>,
- Requires<[HasV4T]>;
+ (A4_combineri IntRegs:$r, s8ExtPred:$i)>;
def : Pat <(HexagonWrapperCombineIR_V4 s8ExtPred:$i, IntRegs:$r),
- (A4_combineir s8ExtPred:$i, IntRegs:$r)>,
- Requires<[HasV4T]>;
+ (A4_combineir s8ExtPred:$i, IntRegs:$r)>;
// A4_combineii: Set two small immediates.
let hasSideEffects = 0, isExtendable = 1, opExtentBits = 6, opExtendable = 2 in
// zext i32->i64
def: Pat <(i64 (zextloadi32 ADDRriS11_2:$src1)),
- (i64 (A4_combineir 0, (L2_loadri_io AddrFI:$src1, 0)))>,
- Requires<[HasV4T]>;
+ (i64 (A4_combineir 0, (L2_loadri_io AddrFI:$src1, 0)))>;
let AddedComplexity = 100 in
def: Pat <(i64 (zextloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
(i64 (A4_combineir 0, (L2_loadri_io IntRegs:$src1,
- s11_2ExtPred:$offset)))>,
- Requires<[HasV4T]>;
+ s11_2ExtPred:$offset)))>;
// anyext i32->i64
def: Pat <(i64 (extloadi32 ADDRriS11_2:$src1)),
- (i64 (A4_combineir 0, (L2_loadri_io AddrFI:$src1, 0)))>,
- Requires<[HasV4T]>;
+ (i64 (A4_combineir 0, (L2_loadri_io AddrFI:$src1, 0)))>;
let AddedComplexity = 100 in
def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))),
(i64 (A4_combineir 0, (L2_loadri_io IntRegs:$src1,
- s11_2ExtPred:$offset)))>,
- Requires<[HasV4T]>;
+ s11_2ExtPred:$offset)))>;
defm S4_storeiri : ST_Imm<"memw", "STriw", u6_2Imm, 0b10>;
}
-let Predicates = [HasV4T], AddedComplexity = 10 in {
+let AddedComplexity = 10 in {
def: Pat<(truncstorei8 s8ExtPred:$src3, (add IntRegs:$src1, u6_0ImmPred:$src2)),
(S4_storeirb_io IntRegs:$src1, u6_0ImmPred:$src2, s8ExtPred:$src3)>;
let AddedComplexity = 6 in
def : Pat <(truncstorei8 s8ExtPred:$src2, (i32 IntRegs:$src1)),
- (S4_storeirb_io IntRegs:$src1, 0, s8ExtPred:$src2)>,
- Requires<[HasV4T]>;
+ (S4_storeirb_io IntRegs:$src1, 0, s8ExtPred:$src2)>;
// memb(Rx++#s4:0:circ(Mu))=Rt
// memb(Rx++I:circ(Mu))=Rt
// memh(Rs+#s11:1)=Rt.H
let AddedComplexity = 6 in
def : Pat <(truncstorei16 s8ExtPred:$src2, (i32 IntRegs:$src1)),
- (S4_storeirh_io IntRegs:$src1, 0, s8ExtPred:$src2)>,
- Requires<[HasV4T]>;
+ (S4_storeirh_io IntRegs:$src1, 0, s8ExtPred:$src2)>;
// memh(Rs+Ru<<#u2)=Rt.H
// TODO: needs to be implemented.
let AddedComplexity = 6 in
def : Pat <(store s8ExtPred:$src2, (i32 IntRegs:$src1)),
- (S4_storeiri_io IntRegs:$src1, 0, s8ExtPred:$src2)>,
- Requires<[HasV4T]>;
+ (S4_storeiri_io IntRegs:$src1, 0, s8ExtPred:$src2)>;
// memw(Rx++#s4:2)=Rt
// memw(Rx++#s4:2:circ(Mu))=Rt
u8ExtPred:$u8)))),
(i32 (TFR_condset_ii (i1 (A4_cmpbeqi (i32 IntRegs:$Rs),
(u8ExtPred:$u8))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setne ( and(Rs, 255), u8))
u8ExtPred:$u8)))),
(i32 (TFR_condset_ii (i1 (A4_cmpbeqi (i32 IntRegs:$Rs),
(u8ExtPred:$u8))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( seteq (Rs, and(Rt, 255)))
(i32 (and (i32 IntRegs:$Rs), 255)))))),
(i32 (TFR_condset_ii (i1 (A4_cmpbeq (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setne (Rs, and(Rt, 255)))
(i32 (and (i32 IntRegs:$Rs), 255)))))),
(i32 (TFR_condset_ii (i1 (A4_cmpbeq (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setugt ( and(Rs, 255), u8))
u8ExtPred:$u8)))),
(i32 (TFR_condset_ii (i1 (A4_cmpbgtui (i32 IntRegs:$Rs),
(u8ExtPred:$u8))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setugt ( and(Rs, 254), u8))
u8ExtPred:$u8)))),
(i32 (TFR_condset_ii (i1 (A4_cmpbgtui (i32 IntRegs:$Rs),
(u8ExtPred:$u8))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setult ( Rs, Rt))
def : Pat <(i32 (zext (i1 (setult (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
(i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rt),
(i32 IntRegs:$Rs))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setlt ( Rs, Rt))
def : Pat <(i32 (zext (i1 (setlt (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
(i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rt),
(i32 IntRegs:$Rs))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// For the sequence
// zext( setugt ( Rs, Rt))
def : Pat <(i32 (zext (i1 (setugt (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
(i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 1, 0))>,
- Requires<[HasV4T]>;
+ 1, 0))>;
// This pattern interefers with coremark performance, not implementing at this
// time.
def : Pat <(i32 (zext (i1 (setuge (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
(i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rt),
(i32 IntRegs:$Rs))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setge ( Rs, Rt))
def : Pat <(i32 (zext (i1 (setge (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
(i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rt),
(i32 IntRegs:$Rs))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setule ( Rs, Rt))
def : Pat <(i32 (zext (i1 (setule (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
(i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setle ( Rs, Rt))
def : Pat <(i32 (zext (i1 (setle (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))),
(i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rs),
(i32 IntRegs:$Rt))),
- 0, 1))>,
- Requires<[HasV4T]>;
+ 0, 1))>;
// For the sequence
// zext( setult ( and(Rs, 255), u8))
def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs),
(ins calltarget:$dst),
"jump $dst",
- []>,
- Requires<[HasV4T]>;
+ []>;
}
// Restore registers and dealloc frame before a tail call.
def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs),
(ins calltarget:$dst),
"call $dst",
- []>,
- Requires<[HasV4T]>;
+ []>;
}
// Save registers function call.
def SAVE_REGISTERS_CALL_V4 : JInst<(outs),
(ins calltarget:$dst),
"call $dst // Save_calle_saved_registers",
- []>,
- Requires<[HasV4T]>;
+ []>;
}
//===----------------------------------------------------------------------===//
isAsCheapAsAMove = 1, isReMaterializable = 1, isCodeGenOnly = 1 in
def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins s16Ext:$src1),
"$dst = #$src1",
- [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>,
- Requires<[HasV4T]>;
+ [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>;
// Transfer a block address into a register
def : Pat<(HexagonCONST32_GP tblockaddress:$src1),
- (TFRI_V4 tblockaddress:$src1)>,
- Requires<[HasV4T]>;
+ (TFRI_V4 tblockaddress:$src1)>;
-let AddedComplexity = 50, Predicates = [HasV4T] in
+let AddedComplexity = 50 in
def : Pat<(HexagonCONST32_GP tglobaladdr:$src1),
- (TFRI_V4 tglobaladdr:$src1)>,
- Requires<[HasV4T]>;
+ (TFRI_V4 tglobaladdr:$src1)>;
// i8/i16/i32 -> i64 loads
// We need a complexity of 120 here to override preceding handling of
MF.getSubtarget().getRegisterInfo());
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
- if (!QRI->Subtarget.hasV4TOps() ||
- DisableNewValueJumps) {
+ if (DisableNewValueJumps) {
return false;
}
// Predicates for constant extendable operands
def s16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 16-bit sign extended field.
- return isInt<16>(v);
- else {
- if (isInt<16>(v))
- return true;
-
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit signed field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ if (isInt<16>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s10ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 10-bit sign extended field.
- return isInt<10>(v);
- else {
- if (isInt<10>(v))
- return true;
+ if (isInt<10>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit signed field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s9ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 9-bit sign extended field.
- return isInt<9>(v);
- else {
- if (isInt<9>(v))
- return true;
+ if (isInt<9>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s8ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 8-bit sign extended field.
- return isInt<8>(v);
- else {
- if (isInt<8>(v))
- return true;
+ if (isInt<8>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit signed field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s8_16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate fits in a 8-bit sign extended field.
- return isInt<8>(v);
- else {
- if (isInt<8>(v))
- return true;
-
- // Return true if extending this immediate is profitable and the value
- // can't fit in a 16-bit signed field. This is required to avoid
- // unnecessary constant extenders.
- return isConstExtProfitable(Node) && !isInt<16>(v);
- }
+ if (isInt<8>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can't fit in a 16-bit signed field. This is required to avoid
+ // unnecessary constant extenders.
+ return isConstExtProfitable(Node) && !isInt<16>(v);
}]>;
def s6ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 6-bit sign extended field.
- return isInt<6>(v);
- else {
- if (isInt<6>(v))
- return true;
+ if (isInt<6>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s6_16ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate fits in a 6-bit sign extended field.
- return isInt<6>(v);
- else {
- if (isInt<6>(v))
- return true;
-
- // Return true if extending this immediate is profitable and the value
- // can't fit in a 16-bit signed field. This is required to avoid
- // unnecessary constant extenders.
- return isConstExtProfitable(Node) && !isInt<16>(v);
- }
+ if (isInt<6>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can't fit in a 16-bit signed field. This is required to avoid
+ // unnecessary constant extenders.
+ return isConstExtProfitable(Node) && !isInt<16>(v);
}]>;
def s6_10ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 6-bit sign extended field.
- return isInt<6>(v);
- else {
- if (isInt<6>(v))
- return true;
-
- // Return true if extending this immediate is profitable and the value
- // can't fit in a 10-bit signed field. This is required to avoid
- // unnecessary constant extenders.
- return isConstExtProfitable(Node) && !isInt<10>(v);
- }
+ if (isInt<6>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can't fit in a 10-bit signed field. This is required to avoid
+ // unnecessary constant extenders.
+ return isConstExtProfitable(Node) && !isInt<10>(v);
}]>;
def s11_0ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 11-bit sign extended field.
- return isShiftedInt<11,0>(v);
- else {
- if (isInt<11>(v))
- return true;
+ if (isInt<11>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit signed field.
- return isConstExtProfitable(Node) && isInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
}]>;
def s11_1ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 12-bit sign extended field and
- // is 2 byte aligned.
+ if (isInt<12>(v))
return isShiftedInt<11,1>(v);
- else {
- if (isInt<12>(v))
- return isShiftedInt<11,1>(v);
- // Return true if extending this immediate is profitable and the low 1 bit
- // is zero (2-byte aligned).
- return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 2) == 0);
- }
+ // Return true if extending this immediate is profitable and the low 1 bit
+ // is zero (2-byte aligned).
+ return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 2) == 0);
}]>;
def s11_2ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 13-bit sign extended field and
- // is 4-byte aligned.
+ if (isInt<13>(v))
return isShiftedInt<11,2>(v);
- else {
- if (isInt<13>(v))
- return isShiftedInt<11,2>(v);
- // Return true if extending this immediate is profitable and the low 2-bits
- // are zero (4-byte aligned).
- return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 4) == 0);
- }
+ // Return true if extending this immediate is profitable and the low 2-bits
+ // are zero (4-byte aligned).
+ return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 4) == 0);
}]>;
def s11_3ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 14-bit sign extended field and
- // is 8-byte aligned.
- return isShiftedInt<11,3>(v);
- else {
- if (isInt<14>(v))
- return isShiftedInt<11,3>(v);
-
- // Return true if extending this immediate is profitable and the low 3-bits
- // are zero (8-byte aligned).
- return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 8) == 0);
- }
+ if (isInt<14>(v))
+ return isShiftedInt<11,3>(v);
+
+ // Return true if extending this immediate is profitable and the low 3-bits
+ // are zero (8-byte aligned).
+ return isConstExtProfitable(Node) && isInt<32>(v) && ((v % 8) == 0);
}]>;
def u0AlwaysExtPred : PatLeaf<(i32 imm), [{
// Predicate for an unsigned 32-bit value that always needs to be extended.
- if (Subtarget->hasV4TOps()) {
- if (isConstExtProfitable(Node)) {
- int64_t v = (int64_t)N->getSExtValue();
- return isUInt<32>(v);
- }
+ if (isConstExtProfitable(Node)) {
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<32>(v);
}
return false;
}]>;
def u6ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 6-bit unsigned field.
- return isUInt<6>(v);
- else {
- if (isUInt<6>(v))
- return true;
+ if (isUInt<6>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
}]>;
def u7ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 7-bit unsigned field.
- return isUInt<7>(v);
- else {
- if (isUInt<7>(v))
- return true;
+ if (isUInt<7>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
}]>;
def u8ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 8-bit unsigned field.
- return isUInt<8>(v);
- else {
- if (isUInt<8>(v))
- return true;
+ if (isUInt<8>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
}]>;
def u9ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 9-bit unsigned field.
- return isUInt<9>(v);
- else {
- if (isUInt<9>(v))
- return true;
+ if (isUInt<9>(v))
+ return true;
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
}]>;
def u6_1ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 7-bit unsigned field and
- // is 2-byte aligned.
+ if (isUInt<7>(v))
return isShiftedUInt<6,1>(v);
- else {
- if (isUInt<7>(v))
- return isShiftedUInt<6,1>(v);
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 2) == 0);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 2) == 0);
}]>;
def u6_2ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 8-bit unsigned field and
- // is 4-byte aligned.
+ if (isUInt<8>(v))
return isShiftedUInt<6,2>(v);
- else {
- if (isUInt<8>(v))
- return isShiftedUInt<6,2>(v);
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 4) == 0);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 4) == 0);
}]>;
def u6_3ExtPred : PatLeaf<(i32 imm), [{
int64_t v = (int64_t)N->getSExtValue();
- if (!Subtarget->hasV4TOps())
- // Return true if the immediate can fit in a 9-bit unsigned field and
- // is 8-byte aligned.
+ if (isUInt<9>(v))
return isShiftedUInt<6,3>(v);
- else {
- if (isUInt<9>(v))
- return isShiftedUInt<6,3>(v);
- // Return true if extending this immediate is profitable and the value
- // can fit in a 32-bit unsigned field.
- return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 8) == 0);
- }
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v) && ((v % 8) == 0);
}]>;
const MCPhysReg *
HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- static const MCPhysReg CalleeSavedRegsV2[] = {
- Hexagon::R24, Hexagon::R25, Hexagon::R26, Hexagon::R27, 0
- };
static const MCPhysReg CalleeSavedRegsV3[] = {
Hexagon::R16, Hexagon::R17, Hexagon::R18, Hexagon::R19,
Hexagon::R20, Hexagon::R21, Hexagon::R22, Hexagon::R23,
};
switch(Subtarget.getHexagonArchVersion()) {
- case HexagonSubtarget::V1:
- break;
- case HexagonSubtarget::V2:
- return CalleeSavedRegsV2;
- case HexagonSubtarget::V3:
case HexagonSubtarget::V4:
case HexagonSubtarget::V5:
return CalleeSavedRegsV3;
const TargetRegisterClass* const*
HexagonRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClassesV2[] = {
- &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
- &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
- };
static const TargetRegisterClass * const CalleeSavedRegClassesV3[] = {
&Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
&Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
};
switch(Subtarget.getHexagonArchVersion()) {
- case HexagonSubtarget::V1:
- break;
- case HexagonSubtarget::V2:
- return CalleeSavedRegClassesV2;
- case HexagonSubtarget::V3:
case HexagonSubtarget::V4:
case HexagonSubtarget::V5:
return CalleeSavedRegClassesV3;
MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
} else if (TII.isMemOp(&MI)) {
// use the constant extender if the instruction provides it
- // and we are V4TOps.
- if (Subtarget.hasV4TOps()) {
- if (TII.isConstExtended(&MI)) {
- MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false);
- MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
- TII.immediateExtend(&MI);
- } else {
- llvm_unreachable("Need to implement for memops");
- }
+ if (TII.isConstExtended(&MI)) {
+ MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false);
+ MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
+ TII.immediateExtend(&MI);
} else {
- // Only V3 and older instructions here.
- unsigned ResReg = HEXAGON_RESERVED_REG_1;
- if (!MFI.hasVarSizedObjects() &&
- TII.isValidOffset(MI.getOpcode(), (FrameSize+Offset))) {
- MI.getOperand(FIOperandNum).ChangeToRegister(getStackRegister(),
- false, false, false);
- MI.getOperand(FIOperandNum+1).ChangeToImmediate(FrameSize+Offset);
- } else if (!TII.isValidOffset(Hexagon::A2_addi, Offset)) {
- BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
- TII.get(Hexagon::CONST32_Int_Real), ResReg).addImm(Offset);
- BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
- TII.get(Hexagon::A2_add), ResReg).addReg(FrameReg).
- addReg(ResReg);
- MI.getOperand(FIOperandNum).ChangeToRegister(ResReg, false, false,
- true);
- MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
- } else {
- BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
- TII.get(Hexagon::A2_addi), ResReg).addReg(FrameReg).
- addImm(Offset);
- MI.getOperand(FIOperandNum).ChangeToRegister(ResReg, false, false,
- true);
- MI.getOperand(FIOperandNum+1).ChangeToImmediate(0);
- }
+ llvm_unreachable("Need to implement for memops");
}
} else {
unsigned dstReg = MI.getOperand(0).getReg();
if (CPUString.empty())
CPUString = "hexagonv4";
- if (CPUString == "hexagonv2") {
- HexagonArchVersion = V2;
- } else if (CPUString == "hexagonv3") {
- EnableV3 = true;
- HexagonArchVersion = V3;
- } else if (CPUString == "hexagonv4") {
+ if (CPUString == "hexagonv4") {
HexagonArchVersion = V4;
} else if (CPUString == "hexagonv5") {
HexagonArchVersion = V5;
public:
enum HexagonArchEnum {
- V1, V2, V3, V4, V5
+ V4, V5
};
HexagonArchEnum HexagonArchVersion;
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- bool hasV2TOps () const { return HexagonArchVersion >= V2; }
- bool hasV2TOpsOnly () const { return HexagonArchVersion == V2; }
- bool hasV3TOps () const { return HexagonArchVersion >= V3; }
- bool hasV3TOpsOnly () const { return HexagonArchVersion == V3; }
- bool hasV4TOps () const { return HexagonArchVersion >= V4; }
- bool hasV4TOpsOnly () const { return HexagonArchVersion == V4; }
- bool useMemOps () const { return HexagonArchVersion >= V4 && UseMemOps; }
- bool hasV5TOps () const { return HexagonArchVersion >= V5; }
- bool hasV5TOpsOnly () const { return HexagonArchVersion == V5; }
- bool modeIEEERndNear () const { return ModeIEEERndNear; }
-
- bool isSubtargetV2() const { return HexagonArchVersion == V2;}
+ bool useMemOps() const { return UseMemOps; }
+ bool hasV5TOps() const { return getHexagonArchVersion() >= V5; }
+ bool hasV5TOpsOnly() const { return getHexagonArchVersion() == V5; }
+ bool modeIEEERndNear() const { return ModeIEEERndNear; }
+
const std::string &getCPUString () const { return CPUString; }
// Threshold for small data section
MachineBasicBlock::iterator &MII) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
- const HexagonRegisterInfo *QRI =
- (const HexagonRegisterInfo *)MF.getSubtarget().getRegisterInfo();
- if (!QRI->Subtarget.hasV4TOps() ||
- !QII->mayBeNewStore(MI))
+ if (!QII->mayBeNewStore(MI))
return false;
MachineInstr *PacketMI = PacketSU->getInstr();
// first store is not in SLOT0. New value store, new value jump,
// dealloc_return and memop always take SLOT0.
// Arch spec 3.4.4.2
- if (QRI->Subtarget.hasV4TOps()) {
- if (MCIDI.mayStore() && MCIDJ.mayStore() &&
- (QII->isNewValueInst(J) || QII->isMemOp(J) || QII->isMemOp(I))) {
- Dependence = true;
- return false;
- }
+ if (MCIDI.mayStore() && MCIDJ.mayStore() &&
+ (QII->isNewValueInst(J) || QII->isMemOp(J) || QII->isMemOp(I))) {
+ Dependence = true;
+ return false;
+ }
- if ((QII->isMemOp(J) && MCIDI.mayStore())
- || (MCIDJ.mayStore() && QII->isMemOp(I))
- || (QII->isMemOp(J) && QII->isMemOp(I))) {
- Dependence = true;
- return false;
- }
+ if ((QII->isMemOp(J) && MCIDI.mayStore())
+ || (MCIDJ.mayStore() && QII->isMemOp(I))
+ || (QII->isMemOp(J) && QII->isMemOp(I))) {
+ Dependence = true;
+ return false;
+ }
- //if dealloc_return
- if (MCIDJ.mayStore() && QII->isDeallocRet(I)) {
- Dependence = true;
- return false;
- }
+ //if dealloc_return
+ if (MCIDJ.mayStore() && QII->isDeallocRet(I)) {
+ Dependence = true;
+ return false;
+ }
- // If an instruction feeds new value jump, glue it.
- MachineBasicBlock::iterator NextMII = I;
- ++NextMII;
- if (NextMII != I->getParent()->end() && QII->isNewValueJump(NextMII)) {
- MachineInstr *NextMI = NextMII;
+ // If an instruction feeds new value jump, glue it.
+ MachineBasicBlock::iterator NextMII = I;
+ ++NextMII;
+ if (NextMII != I->getParent()->end() && QII->isNewValueJump(NextMII)) {
+ MachineInstr *NextMI = NextMII;
- bool secondRegMatch = false;
- bool maintainNewValueJump = false;
+ bool secondRegMatch = false;
+ bool maintainNewValueJump = false;
- if (NextMI->getOperand(1).isReg() &&
- I->getOperand(0).getReg() == NextMI->getOperand(1).getReg()) {
- secondRegMatch = true;
- maintainNewValueJump = true;
- }
+ if (NextMI->getOperand(1).isReg() &&
+ I->getOperand(0).getReg() == NextMI->getOperand(1).getReg()) {
+ secondRegMatch = true;
+ maintainNewValueJump = true;
+ }
- if (!secondRegMatch &&
- I->getOperand(0).getReg() == NextMI->getOperand(0).getReg()) {
- maintainNewValueJump = true;
- }
+ if (!secondRegMatch &&
+ I->getOperand(0).getReg() == NextMI->getOperand(0).getReg()) {
+ maintainNewValueJump = true;
+ }
- for (std::vector<MachineInstr*>::iterator
- VI = CurrentPacketMIs.begin(),
- VE = CurrentPacketMIs.end();
- (VI != VE && maintainNewValueJump); ++VI) {
- SUnit *PacketSU = MIToSUnit.find(*VI)->second;
+ for (std::vector<MachineInstr*>::iterator
+ VI = CurrentPacketMIs.begin(),
+ VE = CurrentPacketMIs.end();
+ (VI != VE && maintainNewValueJump); ++VI) {
+ SUnit *PacketSU = MIToSUnit.find(*VI)->second;
- // NVJ can not be part of the dual jump - Arch Spec: section 7.8
- if (PacketSU->getInstr()->getDesc().isCall()) {
- Dependence = true;
- break;
- }
- // Validate
- // 1. Packet does not have a store in it.
- // 2. If the first operand of the nvj is newified, and the second
- // operand is also a reg, it (second reg) is not defined in
- // the same packet.
- // 3. If the second operand of the nvj is newified, (which means
- // first operand is also a reg), first reg is not defined in
- // the same packet.
- if (PacketSU->getInstr()->getDesc().mayStore() ||
- PacketSU->getInstr()->getOpcode() == Hexagon::S2_allocframe ||
- // Check #2.
- (!secondRegMatch && NextMI->getOperand(1).isReg() &&
- PacketSU->getInstr()->modifiesRegister(
- NextMI->getOperand(1).getReg(), QRI)) ||
- // Check #3.
- (secondRegMatch &&
- PacketSU->getInstr()->modifiesRegister(
- NextMI->getOperand(0).getReg(), QRI))) {
- Dependence = true;
- break;
- }
+ // NVJ can not be part of the dual jump - Arch Spec: section 7.8
+ if (PacketSU->getInstr()->getDesc().isCall()) {
+ Dependence = true;
+ break;
+ }
+ // Validate
+ // 1. Packet does not have a store in it.
+ // 2. If the first operand of the nvj is newified, and the second
+ // operand is also a reg, it (second reg) is not defined in
+ // the same packet.
+ // 3. If the second operand of the nvj is newified, (which means
+ // first operand is also a reg), first reg is not defined in
+ // the same packet.
+ if (PacketSU->getInstr()->getDesc().mayStore() ||
+ PacketSU->getInstr()->getOpcode() == Hexagon::S2_allocframe ||
+ // Check #2.
+ (!secondRegMatch && NextMI->getOperand(1).isReg() &&
+ PacketSU->getInstr()->modifiesRegister(
+ NextMI->getOperand(1).getReg(), QRI)) ||
+ // Check #3.
+ (secondRegMatch &&
+ PacketSU->getInstr()->modifiesRegister(
+ NextMI->getOperand(0).getReg(), QRI))) {
+ Dependence = true;
+ break;
}
- if (!Dependence)
- GlueToNewValueJump = true;
- else
- return false;
}
+ if (!Dependence)
+ GlueToNewValueJump = true;
+ else
+ return false;
}
if (SUJ->isSucc(SUI)) {
else if ((DepType == SDep::Order) &&
!I->hasOrderedMemoryRef() &&
!J->hasOrderedMemoryRef()) {
- if (QRI->Subtarget.hasV4TOps() &&
- // hexagonv4 allows dual store.
- MCIDI.mayStore() && MCIDJ.mayStore()) {
+ if (MCIDI.mayStore() && MCIDJ.mayStore()) {
/* do nothing */
}
// store followed by store-- not OK on V2
// packetized in a same packet. This implies that the store is using
// caller's SP. Hence, offset needs to be updated accordingly.
else if (DepType == SDep::Data
- && QRI->Subtarget.hasV4TOps()
&& J->getOpcode() == Hexagon::S2_allocframe
&& (I->getOpcode() == Hexagon::S2_storerd_io
|| I->getOpcode() == Hexagon::S2_storeri_io