-//===-- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering -----------===//
+//===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
//
// The LLVM Compiler Infrastructure
//
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-///
+//
/// \file
/// This file implements the lowering of LLVM calls to machine code calls for
/// GlobalISel.
-///
+//
//===----------------------------------------------------------------------===//
#include "ARMCallLowering.h"
-
#include "ARMBaseInstrInfo.h"
#include "ARMISelLowering.h"
#include "ARMSubtarget.h"
-
+#include "Utils/ARMBaseInfo.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <utility>
using namespace llvm;
}
namespace {
+
/// Helper class for values going out through an ABI boundary (used for handling
/// function return values and call parameters).
struct OutgoingValueHandler : public CallLowering::ValueHandler {
OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
- : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB), StackSize(0) {}
+ : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
unsigned getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
}
MachineInstrBuilder &MIB;
- uint64_t StackSize;
+ uint64_t StackSize = 0;
};
-} // End anonymous namespace.
+
+} // end anonymous namespace
void ARMCallLowering::splitToValueTypes(
const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
}
namespace {
+
/// Helper class for values coming in through an ABI boundary (used for handling
/// formal arguments and call return values).
struct IncomingValueHandler : public CallLowering::ValueHandler {
MIRBuilder.getMBB().addLiveIn(PhysReg);
}
};
-} // End anonymous namespace
+
+} // end anonymous namespace
bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
}
namespace {
+
struct CallReturnHandler : public IncomingValueHandler {
CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
MachineInstrBuilder MIB, CCAssignFn *AssignFn)
MachineInstrBuilder MIB;
};
-} // End anonymous namespace.
+
+} // end anonymous namespace
bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
CallingConv::ID CallConv,
-//===-- llvm/lib/Target/ARM/ARMCallLowering.h - Call lowering -------------===//
+//===- llvm/lib/Target/ARM/ARMCallLowering.h - Call lowering ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-///
+//
/// \file
/// This file describes how to lower LLVM calls to machine code calls.
-///
+//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_ARM_ARMCALLLOWERING
-#define LLVM_LIB_TARGET_ARM_ARMCALLLOWERING
+#ifndef LLVM_LIB_TARGET_ARM_ARMCALLLOWERING_H
+#define LLVM_LIB_TARGET_ARM_ARMCALLLOWERING_H
-#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
-#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/CallingConv.h"
+#include <cstdint>
+#include <functional>
namespace llvm {
class ARMTargetLowering;
+class MachineFunction;
class MachineInstrBuilder;
+class MachineIRBuilder;
+class Value;
class ARMCallLowering : public CallLowering {
public:
bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val,
unsigned VReg, MachineInstrBuilder &Ret) const;
- typedef std::function<void(unsigned Reg, uint64_t Offset)> SplitArgTy;
+ using SplitArgTy = std::function<void(unsigned Reg, uint64_t Offset)>;
/// Split an argument into one or more arguments that the CC lowering can cope
/// with (e.g. replace pointers with integers).
MachineFunction &MF,
const SplitArgTy &PerformArgSplit) const;
};
-} // End of namespace llvm
-#endif
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_ARM_ARMCALLLOWERING_H
-//===-- ARMConstantIslandPass.cpp - ARM constant islands ------------------===//
+//===- ARMConstantIslandPass.cpp - ARM constant islands -------------------===//
//
// The LLVM Compiler Infrastructure
//
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "Thumb2InstrInfo.h"
+#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include <cassert>
#include <cstdint>
#include <iterator>
-#include <new>
#include <utility>
#include <vector>
/// previous iteration by inserting unconditional branches.
SmallSet<MachineBasicBlock*, 4> NewWaterList;
- typedef std::vector<MachineBasicBlock*>::iterator water_iterator;
+ using water_iterator = std::vector<MachineBasicBlock *>::iterator;
/// CPUser - One user of a constant pool, keeping the machine instruction
/// pointer, the constant pool being referenced, and the max displacement
unsigned MaxDisp;
bool NegOk;
bool IsSoImm;
- bool KnownAlignment;
+ bool KnownAlignment = false;
CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
bool neg, bool soimm)
- : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm),
- KnownAlignment(false) {
+ : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
HighWaterMark = CPEMI->getParent();
}
};
/// ImmBranches - Keep track of all the immediate branch instructions.
- ///
std::vector<ImmBranch> ImmBranches;
/// PushPopMIs - Keep track of all the Thumb push / pop instructions.
- ///
SmallVector<MachineInstr*, 4> PushPopMIs;
/// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
}
};
- char ARMConstantIslands::ID = 0;
-
} // end anonymous namespace
+char ARMConstantIslands::ID = 0;
+
/// verify - check BBOffsets, BBSizes, alignment of islands
void ARMConstantIslands::verify() {
#ifndef NDEBUG
/// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
/// look up the corresponding CPEntry.
-ARMConstantIslands::CPEntry
-*ARMConstantIslands::findConstPoolEntry(unsigned CPI,
- const MachineInstr *CPEMI) {
+ARMConstantIslands::CPEntry *
+ARMConstantIslands::findConstPoolEntry(unsigned CPI,
+ const MachineInstr *CPEMI) {
std::vector<CPEntry> &CPEs = CPEntries[CPI];
// Number of entries per constpool index should be small, just do a
// linear search.
/// and instruction CPEMI, and decrement its refcount. If the refcount
/// becomes 0 remove the entry and instruction. Returns true if we removed
/// the entry, false if we didn't.
-
bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
MachineInstr *CPEMI) {
// Find the old entry. Eliminate it if it is no longer used.
/// 0 = no existing entry found
/// 1 = entry found, and there were no code insertions or deletions
/// 2 = entry found, and there were code insertions or deletions
-int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
-{
+int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
-//===-- ARMConstantPoolValue.cpp - ARM constantpool value -----------------===//
+//===- ARMConstantPoolValue.cpp - ARM constantpool value ------------------===//
//
// The LLVM Compiler Infrastructure
//
#include "ARMConstantPoolValue.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
-//===-- ARMConstantPoolValue.h - ARM constantpool value ---------*- C++ -*-===//
+//===- ARMConstantPoolValue.h - ARM constantpool value ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
#ifndef LLVM_LIB_TARGET_ARM_ARMCONSTANTPOOLVALUE_H
#define LLVM_LIB_TARGET_ARM_ARMCONSTANTPOOLVALUE_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/Support/Casting.h"
#include <string>
class GlobalVariable;
class LLVMContext;
class MachineBasicBlock;
+class raw_ostream;
+class Type;
namespace ARMCP {
const GlobalValue *getGV() const;
const BlockAddress *getBlockAddress() const;
- typedef SmallPtrSet<const GlobalVariable *, 1>::iterator promoted_iterator;
+ using promoted_iterator = SmallPtrSet<const GlobalVariable *, 1>::iterator;
+
iterator_range<promoted_iterator> promotedGlobals() {
return iterator_range<promoted_iterator>(GVars.begin(), GVars.end());
}
-//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===//
+//===- ARMFastISel.cpp - ARM FastISel implementation ----------------------===//
//
// The LLVM Compiler Infrastructure
//
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
+#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include <cassert>
#include <cstdint>
#include <utility>
namespace {
// All possible address modes, plus some.
- typedef struct Address {
+ struct Address {
enum {
RegBase,
FrameIndexBase
Address() {
Base.Reg = 0;
}
- } Address;
+ };
class ARMFastISel final : public FastISel {
/// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
return false;
}
-namespace {
-
// This table describes sign- and zero-extend instructions which can be
// folded into a preceding load. All of these extends have an immediate
// (sometimes a mask and sometimes a shift) that's applied after
// extension.
-const struct FoldableLoadExtendsStruct {
+static const struct FoldableLoadExtendsStruct {
uint16_t Opc[2]; // ARM, Thumb.
uint8_t ExpectedImm;
uint8_t isZExt : 1;
{ { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
};
-} // end anonymous namespace
-
/// \brief The specified machine instr operand is a vreg, and that
/// vreg is being provided by the specified load instruction. If possible,
/// try to fold the load as an operand to the instruction, returning true if
-//===-- ARMFrameLowering.cpp - ARM Frame Information ----------------------===//
+//===- ARMFrameLowering.cpp - ARM Frame Information -----------------------===//
//
// The LLVM Compiler Infrastructure
//
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
+#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
DebugLoc DL;
- typedef std::pair<unsigned, bool> RegAndKill;
+ using RegAndKill = std::pair<unsigned, bool>;
+
SmallVector<RegAndKill, 4> Regs;
unsigned i = CSI.size();
while (i != 0) {
// In functions that realign the stack, it can be an advantage to spill the
// callee-saved vector registers after realigning the stack. The vst1 and vld1
// instructions take alignment hints that can improve performance.
-//
static void
checkNumAlignedDPRCS2Regs(MachineFunction &MF, BitVector &SavedRegs) {
MF.getInfo<ARMFunctionInfo>()->setNumAlignedDPRCS2Regs(0);
-//==-- ARMTargetFrameLowering.h - Define frame lowering for ARM --*- C++ -*-==//
+//===- ARMTargetFrameLowering.h - Define frame lowering for ARM -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_ARM_ARMFRAMELOWERING_H
#define LLVM_LIB_TARGET_ARM_ARMFRAMELOWERING_H
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include <vector>
namespace llvm {
- class ARMSubtarget;
+
+class ARMSubtarget;
+class CalleeSavedInfo;
+class MachineFunction;
class ARMFrameLowering : public TargetFrameLowering {
protected:
return true;
}
- private:
+private:
void emitPushInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI, unsigned StmOpc,
unsigned StrOpc, bool NoGap,
MachineBasicBlock::iterator MI) const override;
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_ARM_ARMFRAMELOWERING_H
-//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
+//===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
//
// The LLVM Compiler Infrastructure
//
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
+#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal) const {
-
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
SDValue &StackPtr,
SmallVectorImpl<SDValue> &MemOpChains,
ISD::ArgFlagsTy Flags) const {
-
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
DAG.getVTList(MVT::i32, MVT::i32), Arg);
unsigned id = Subtarget->isLittle() ? 0 : 1;
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const {
-
// CCValAssign - represent the assignment of the return value to a location.
SmallVector<CCValAssign, 16> RVLocs;
DAG.getIntPtrConstant(1, dl));
} else
ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
-
} else {
const TargetRegisterClass *RC;
}
InVals.push_back(ArgValue);
-
} else { // VA.isRegLoc()
// sanity check
assert(VA.isMemLoc());
// Convert the carry flag into a boolean value.
Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
break;
- case ISD::USUBO: {
+ case ISD::USUBO:
Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS);
// Convert the carry flag into a boolean value.
Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
DAG.getConstant(1, dl, MVT::i32), Overflow);
break;
}
- }
return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
}
SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
ARMcc, CCR, CmpLo);
-
SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
SDValue HiBigShift = Opc == ISD::SRA
? DAG.getNode(Opc, dl, VT, ShOpHi,
// Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
if (Opc == ARMISD::VCEQ) {
-
SDValue AndOp;
if (ISD::isBuildVectorAllZeros(Op1.getNode()))
AndOp = Op0;
static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) {
-
if (Subtarget->isThumb()) {
if (!Subtarget->hasDSP())
return SDValue();
AddeNode->getOperand(1).getNode() == UmlalNode) ||
(AddeNode->getOperand(0).getNode() == UmlalNode &&
isNullConstant(AddeNode->getOperand(1)))) {
-
SelectionDAG &DAG = DCI.DAG;
SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
UmlalNode->getOperand(2), AddHi };
SDValue RHS = N->getOperand(1);
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
int32_t imm = C->getSExtValue();
- if (imm < 0 && imm > INT_MIN) {
+ if (imm < 0 && imm > std::numeric_limits<int>::min()) {
SDLoc DL(N);
RHS = DAG.getConstant(-imm, DL, MVT::i32);
unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
MVT::i32)));
Res = DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, MVT::i32), Res);
-
} else
return SDValue();
}
return -1;
}
-
static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
if (V < 0)
return false;
return weight;
}
-typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
+using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
+
RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
if (Constraint.size() == 1) {
Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
if (!Subtarget->isLittle())
- std::swap (Lo, Hi);
+ std::swap(Lo, Hi);
Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
return Builder.CreateCall(Strex, {Lo, Hi, Addr});
}
DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
-
// If we're generating more than one load, compute the base address of
// subsequent loads as an offset from the previous.
if (LoadCount > 0)
Intrinsic::arm_neon_vst4};
for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
-
// If we generating more than one store, we compute the base address of
// subsequent stores as an offset from the previous.
if (StoreCount > 0)
-//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
+//===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineValueType.h"
-#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/Support/CodeGen.h"
namespace llvm {
class ARMSubtarget;
+class DataLayout;
+class FastISel;
+class FunctionLoweringInfo;
+class GlobalValue;
class InstrItineraryData;
+class Instruction;
+class MachineBasicBlock;
+class MachineInstr;
+class SelectionDAG;
+class TargetLibraryInfo;
+class TargetMachine;
+class TargetRegisterInfo;
+class VectorType;
namespace ARMISD {
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
- ///
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const override;
const InstrItineraryData *Itins;
/// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
- ///
unsigned ARMPCLabelIndex;
// TODO: remove this, and have shouldInsertFencesForAtomic do the proper
void addQRTypeForNEON(MVT VT);
std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
- typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
+ using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>;
void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
SDValue &Arg, RegsToPassVector &RegsToPass,
-//===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ------------===//
+//===- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass -------------===//
//
// The LLVM Compiler Infrastructure
//
#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
-#include "ThumbRegisterInfo.h"
+#include "MCTargetDesc/ARMBaseInfo.h"
+#include "Utils/ARMBaseInfo.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Type.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/Pass.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <iterator>
+#include <limits>
+#include <utility>
+
using namespace llvm;
#define DEBUG_TYPE "arm-ldst-opt"
#define ARM_LOAD_STORE_OPT_NAME "ARM load / store optimization pass"
namespace {
+
/// Post- register allocation pass the combine load / store instructions to
/// form ldm / stm instructions.
struct ARMLoadStoreOpt : public MachineFunctionPass {
static char ID;
- ARMLoadStoreOpt() : MachineFunctionPass(ID) {}
const MachineFunction *MF;
const TargetInstrInfo *TII;
bool RegClassInfoValid;
bool isThumb1, isThumb2;
+ ARMLoadStoreOpt() : MachineFunctionPass(ID) {}
+
bool runOnMachineFunction(MachineFunction &Fn) override;
MachineFunctionProperties getRequiredProperties() const override {
MachineInstr *MI;
int Offset; ///< Load/Store offset.
unsigned Position; ///< Position as counted from end of basic block.
+
MemOpQueueEntry(MachineInstr &MI, int Offset, unsigned Position)
: MI(&MI), Offset(Offset), Position(Position) {}
};
- typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
+ using MemOpQueue = SmallVector<MemOpQueueEntry, 8>;
/// A set of MachineInstrs that fulfill (nearly all) conditions to get
/// merged into a LDM/STM.
struct MergeCandidate {
/// List of instructions ordered by load/store offset.
SmallVector<MachineInstr*, 4> Instrs;
+
/// Index in Instrs of the instruction being latest in the schedule.
unsigned LatestMIIdx;
+
/// Index in Instrs of the instruction being earliest in the schedule.
unsigned EarliestMIIdx;
+
/// Index into the basic block where the merged instruction will be
/// inserted. (See MemOpQueueEntry.Position)
unsigned InsertPos;
+
/// Whether the instructions can be merged into a ldm/stm instruction.
bool CanMergeToLSMulti;
+
/// Whether the instructions can be merged into a ldrd/strd instruction.
bool CanMergeToLSDouble;
};
bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
bool CombineMovBx(MachineBasicBlock &MBB);
};
- char ARMLoadStoreOpt::ID = 0;
-}
+
+} // end anonymous namespace
+
+char ARMLoadStoreOpt::ID = 0;
INITIALIZE_PASS(ARMLoadStoreOpt, "arm-ldst-opt", ARM_LOAD_STORE_OPT_NAME, false,
false)
MO.setImm(Offset);
else
InsertSub = true;
-
} else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
!definesCPSR(*MBBI)) {
// SUBS/ADDS using this register, with a dead def of the CPSR.
} else {
InsertSub = true;
}
-
} else {
// Can't update the instruction.
InsertSub = true;
}
-
} else if (definesCPSR(*MBBI) || MBBI->isCall() || MBBI->isBranch()) {
// Since SUBS sets the condition flags, we can't place the base reset
// after an instruction that has a live CPSR def.
// Insert a sub instruction after the newly formed instruction to reset.
if (!BaseKill)
UpdateBaseRegUses(MBB, InsertBefore, DL, Base, NumRegs, Pred, PredReg);
-
} else {
// No writeback, simply build the MachineInstr.
MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode));
}
// Attempt the merge.
- typedef MachineBasicBlock::iterator iterator;
+ using iterator = MachineBasicBlock::iterator;
+
MachineInstr *LatestMI = Cand.Instrs[Cand.LatestMIIdx];
iterator InsertBefore = std::next(iterator(LatestMI));
MachineBasicBlock &MBB = *LatestMI->getParent();
int Offset = MemOps[SIndex].Offset;
const MachineOperand &PMO = getLoadStoreRegOp(*MI);
unsigned PReg = PMO.getReg();
- unsigned PRegNum = PMO.isUndef() ? UINT_MAX : TRI->getEncodingValue(PReg);
+ unsigned PRegNum = PMO.isUndef() ? std::numeric_limits<unsigned>::max()
+ : TRI->getEncodingValue(PReg);
unsigned Latest = SIndex;
unsigned Earliest = SIndex;
unsigned Count = 1;
break;
// See if the current load/store may be part of a multi load/store.
- unsigned RegNum = MO.isUndef() ? UINT_MAX : TRI->getEncodingValue(Reg);
+ unsigned RegNum = MO.isUndef() ? std::numeric_limits<unsigned>::max()
+ : TRI->getEncodingValue(Reg);
bool PartOfLSMulti = CanMergeToLSMulti;
if (PartOfLSMulti) {
// Register numbers must be in ascending order.
MergeBaseCandidates.push_back(&*MBBI);
}
-
// If we are here then the chain is broken; Extract candidates for a merge.
if (MemOps.size() > 0) {
FormCandidates(MemOps);
"ARM pre- register allocation load / store optimization pass"
namespace {
+
/// Pre- register allocation pass that move load / stores from consecutive
/// locations close to make it more likely they will be combined later.
struct ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
static char ID;
- ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
AliasAnalysis *AA;
const DataLayout *TD;
MachineRegisterInfo *MRI;
MachineFunction *MF;
+ ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
+
bool runOnMachineFunction(MachineFunction &Fn) override;
StringRef getPassName() const override {
return ARM_PREALLOC_LOAD_STORE_OPT_NAME;
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AAResultsWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
DenseMap<MachineInstr*, unsigned> &MI2LocMap);
bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
};
- char ARMPreAllocLoadStoreOpt::ID = 0;
-}
+
+} // end anonymous namespace
+
+char ARMPreAllocLoadStoreOpt::ID = 0;
INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-ldst-opt",
ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false)
bool RetVal = false;
DenseMap<MachineInstr*, unsigned> MI2LocMap;
- DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2LdsMap;
- DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2StsMap;
+ DenseMap<unsigned, SmallVector<MachineInstr *, 4>> Base2LdsMap;
+ DenseMap<unsigned, SmallVector<MachineInstr *, 4>> Base2StsMap;
SmallVector<unsigned, 4> LdBases;
SmallVector<unsigned, 4> StBases;
bool StopHere = false;
if (isLd) {
- DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
+ DenseMap<unsigned, SmallVector<MachineInstr *, 4>>::iterator BI =
Base2LdsMap.find(Base);
if (BI != Base2LdsMap.end()) {
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
LdBases.push_back(Base);
}
} else {
- DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
+ DenseMap<unsigned, SmallVector<MachineInstr *, 4>>::iterator BI =
Base2StsMap.find(Base);
if (BI != Base2StsMap.end()) {
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
return RetVal;
}
-
/// Returns an instance of the load / store optimization pass.
FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
if (PreAlloc)
-//===-- ARMTargetTransformInfo.cpp - ARM specific TTI ---------------------===//
+//===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
//
// The LLVM Compiler Infrastructure
//
//===----------------------------------------------------------------------===//
#include "ARMTargetTransformInfo.h"
-#include "llvm/Support/Debug.h"
+#include "ARMSubtarget.h"
+#include "MCTargetDesc/ARMAddressingModes.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Type.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Target/CostTable.h"
-#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
using namespace llvm;
#define DEBUG_TYPE "armtti"
return 3;
}
-
// Constants smaller than 256 fit in the immediate field of
// Thumb1 instructions so we return a zero cost and 1 otherwise.
int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
return getIntImmCost(Imm, Ty);
}
-
int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
const Instruction *I) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
const Instruction *I) {
-
int ISD = TLI->InstructionOpcodeToISD(Opcode);
// On NEON a a vector select gets lowered to vbsl.
if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo,
ArrayRef<const Value *> Args) {
-
int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
-//===-- ARMTargetTransformInfo.h - ARM specific TTI -------------*- C++ -*-===//
+//===- ARMTargetTransformInfo.h - ARM specific TTI --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
/// \file
/// This file a TargetTransformInfo::Concept conforming object specific to the
/// ARM target machine. It uses the target's detailed information to
/// provide more precise answers to certain TTI queries, while letting the
/// target independent and default TTI implementations handle the rest.
-///
+//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
#define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
#include "ARM.h"
+#include "ARMSubtarget.h"
#include "ARMTargetMachine.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
-#include "llvm/Target/TargetLowering.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/SubtargetFeature.h"
namespace llvm {
+class APInt;
+class ARMTargetLowering;
+class Instruction;
+class Loop;
+class SCEV;
+class ScalarEvolution;
+class Type;
+class Value;
+
class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
- typedef BasicTTIImplBase<ARMTTIImpl> BaseT;
- typedef TargetTransformInfo TTI;
+ using BaseT = BasicTTIImplBase<ARMTTIImpl>;
+ using TTI = TargetTransformInfo;
+
friend BaseT;
const ARMSubtarget *ST;
} // end namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
-//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
+//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
//
// The LLVM Compiler Infrastructure
//
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMMCExpr.h"
+#include "MCTargetDesc/ARMMCTargetDesc.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/BinaryFormat/COFF.h"
-#include "llvm/BinaryFormat/ELF.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler/MCDisassembler.h"
-#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
#include "llvm/MC/MCParser/MCAsmParserUtils.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/Support/ARMEHABI.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/SMLoc.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
using namespace llvm;
static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
cl::init(false));
-class ARMOperand;
-
enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
class UnwindContext {
- MCAsmParser &Parser;
-
- typedef SmallVector<SMLoc, 4> Locs;
+ using Locs = SmallVector<SMLoc, 4>;
+ MCAsmParser &Parser;
Locs FnStartLocs;
Locs CantUnwindLocs;
Locs PersonalityLocs;
bool hasFnStart() const { return !FnStartLocs.empty(); }
bool cantUnwind() const { return !CantUnwindLocs.empty(); }
bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
+
bool hasPersonality() const {
return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
}
FI != FE; ++FI)
Parser.Note(*FI, ".fnstart was specified here");
}
+
void emitCantUnwindLocNotes() const {
for (Locs::const_iterator UI = CantUnwindLocs.begin(),
UE = CantUnwindLocs.end(); UI != UE; ++UI)
Parser.Note(*UI, ".cantunwind was specified here");
}
+
void emitHandlerDataLocNotes() const {
for (Locs::const_iterator HI = HandlerDataLocs.begin(),
HE = HandlerDataLocs.end(); HI != HE; ++HI)
Parser.Note(*HI, ".handlerdata was specified here");
}
+
void emitPersonalityLocNotes() const {
for (Locs::const_iterator PI = PersonalityLocs.begin(),
PE = PersonalityLocs.end(),
// would be legal.
} ITState;
- llvm::SmallVector<MCInst, 4> PendingConditionalInsts;
+ SmallVector<MCInst, 4> PendingConditionalInsts;
void flushPendingInstructions(MCStreamer &Out) override {
if (!inImplicitITBlock()) {
bool inITBlock() { return ITState.CurPosition != ~0U; }
bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
+
bool lastInITBlock() {
return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
}
+
void forwardITPosition() {
if (!inITBlock()) return;
// Move to the next instruction in the IT block, if there is one. If not,
assert(inImplicitITBlock());
assert(ITState.CurPosition == 1);
ITState.CurPosition = ~0U;
- return;
}
// Return the low-subreg of a given Q register.
ITState.Mask = 8;
ITState.CurPosition = 1;
ITState.IsExplicit = false;
- return;
}
// Create a new explicit IT block with the given condition and mask. The mask
ITState.Mask = Mask;
ITState.CurPosition = 0;
ITState.IsExplicit = true;
- return;
}
void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Note(L, Msg, Range);
}
+
bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Warning(L, Msg, Range);
}
+
bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
return getParser().Error(L, Msg, Range);
}
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[ARM::ModeThumb];
}
+
bool isThumbOne() const {
return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
+
bool isThumbTwo() const {
return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
+
bool hasThumb() const {
return getSTI().getFeatureBits()[ARM::HasV4TOps];
}
+
bool hasThumb2() const {
return getSTI().getFeatureBits()[ARM::FeatureThumb2];
}
+
bool hasV6Ops() const {
return getSTI().getFeatureBits()[ARM::HasV6Ops];
}
+
bool hasV6T2Ops() const {
return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
}
+
bool hasV6MOps() const {
return getSTI().getFeatureBits()[ARM::HasV6MOps];
}
+
bool hasV7Ops() const {
return getSTI().getFeatureBits()[ARM::HasV7Ops];
}
+
bool hasV8Ops() const {
return getSTI().getFeatureBits()[ARM::HasV8Ops];
}
+
bool hasV8MBaseline() const {
return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
}
+
bool hasV8MMainline() const {
return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
}
+
bool has8MSecExt() const {
return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
}
+
bool hasARM() const {
return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
}
+
bool hasDSP() const {
return getSTI().getFeatureBits()[ARM::FeatureDSP];
}
+
bool hasD16() const {
return getSTI().getFeatureBits()[ARM::FeatureD16];
}
+
bool hasV8_1aOps() const {
return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
}
+
bool hasRAS() const {
return getSTI().getFeatureBits()[ARM::FeatureRAS];
}
uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
setAvailableFeatures(FB);
}
+
void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
+
bool isMClass() const {
return getSTI().getFeatureBits()[ARM::FeatureMClass];
}
bool &EmitInITBlock, MCStreamer &Out);
void onLabelParsed(MCSymbol *Symbol) override;
};
-} // end anonymous namespace
-
-namespace {
/// ARMOperand - Instances of this class represent a parsed ARM machine
/// operand.
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const override { return StartLoc; }
+
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
+
/// getLocRange - Get the range between the first and last token of this
/// operand.
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
}
return false;
}
+
// checks whether this operand is an signed offset which fits is a field
// of specified width and scaled by a specific number of bits
template<unsigned width, unsigned scale>
else return false;
return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
}
+
bool isFPImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int64_t Value = CE->getValue();
return Value >= N && Value <= M;
}
+
template<int64_t N, int64_t M>
bool isImmediateS4() const {
if (!isImm()) return false;
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= N && Value <= M;
}
+
bool isFBits16() const {
return isImmediate<0, 17>();
}
// explicitly exclude zero. we want that to use the normal 0_508 version.
return ((Value & 3) == 0) && Value > 0 && Value <= 508;
}
+
bool isImm0_4095Neg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int64_t Value = -CE->getValue();
return Value > 0 && Value < 4096;
}
+
bool isImm0_7() const {
return isImmediate<0, 7>();
}
+
bool isImm1_16() const {
return isImmediate<1, 16>();
}
+
bool isImm1_32() const {
return isImmediate<1, 32>();
}
+
bool isImm8_255() const {
return isImmediate<8, 255>();
}
+
bool isImm256_65535Expr() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int64_t Value = CE->getValue();
return Value >= 256 && Value < 65536;
}
+
bool isImm0_65535Expr() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int64_t Value = CE->getValue();
return Value >= 0 && Value < 65536;
}
+
bool isImm24bit() const {
return isImmediate<0, 0xffffff + 1>();
}
+
bool isImmThumbSR() const {
return isImmediate<1, 33>();
}
+
bool isPKHLSLImm() const {
return isImmediate<0, 32>();
}
+
bool isPKHASRImm() const {
return isImmediate<0, 33>();
}
+
bool isAdrLabel() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup.
return (ARM_AM::getSOImmVal(Value) != -1 ||
ARM_AM::getSOImmVal(-Value) != -1);
}
+
bool isT2SOImm() const {
// If we have an immediate that's not a constant, treat it as an expression
// needing a fixup.
int64_t Value = CE->getValue();
return ARM_AM::getT2SOImmVal(Value) != -1;
}
+
bool isT2SOImmNot() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
return ARM_AM::getT2SOImmVal(Value) == -1 &&
ARM_AM::getT2SOImmVal(~Value) != -1;
}
+
bool isT2SOImmNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
return ARM_AM::getT2SOImmVal(Value) == -1 &&
ARM_AM::getT2SOImmVal(-Value) != -1;
}
+
bool isSetEndImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int64_t Value = CE->getValue();
return Value == 1 || Value == 0;
}
+
bool isReg() const override { return Kind == k_Register; }
bool isRegList() const { return Kind == k_RegisterList; }
bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
bool isRotImm() const { return Kind == k_RotateImmediate; }
bool isModImm() const { return Kind == k_ModifiedImmediate; }
+
bool isModImmNot() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int64_t Value = CE->getValue();
return ARM_AM::getSOImmVal(~Value) != -1;
}
+
bool isModImmNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
return ARM_AM::getSOImmVal(Value) == -1 &&
ARM_AM::getSOImmVal(-Value) != -1;
}
+
bool isThumbModImmNeg1_7() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int32_t Value = -(int32_t)CE->getValue();
return 0 < Value && Value < 8;
}
+
bool isThumbModImmNeg8_255() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
int32_t Value = -(int32_t)CE->getValue();
return 7 < Value && Value < 256;
}
+
bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
+ return (Val > -4096 && Val < 4096) ||
+ (Val == std::numeric_limits<int32_t>::min());
}
+
bool isAlignedMemory() const {
return isMemNoOffset(true);
}
+
bool isAlignedMemoryNone() const {
return isMemNoOffset(false, 0);
}
+
bool isDupAlignedMemoryNone() const {
return isMemNoOffset(false, 0);
}
+
bool isAlignedMemory16() const {
if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
return true;
return isMemNoOffset(false, 0);
}
+
bool isDupAlignedMemory16() const {
if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
return true;
return isMemNoOffset(false, 0);
}
+
bool isAlignedMemory32() const {
if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
return true;
return isMemNoOffset(false, 0);
}
+
bool isDupAlignedMemory32() const {
if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
return true;
return isMemNoOffset(false, 0);
}
+
bool isAlignedMemory64() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return isMemNoOffset(false, 0);
}
+
bool isDupAlignedMemory64() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return isMemNoOffset(false, 0);
}
+
bool isAlignedMemory64or128() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return true;
return isMemNoOffset(false, 0);
}
+
bool isDupAlignedMemory64or128() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return true;
return isMemNoOffset(false, 0);
}
+
bool isAlignedMemory64or128or256() const {
if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
return true;
return true;
return isMemNoOffset(false, 0);
}
+
bool isAddrMode2() const {
if (!isMem() || Memory.Alignment != 0) return false;
// Check for register offset.
int64_t Val = Memory.OffsetImm->getValue();
return Val > -4096 && Val < 4096;
}
+
bool isAM2OffsetImm() const {
if (!isImm()) return false;
// Immediate offset in range [-4095, 4095].
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
- return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
+ return (Val == std::numeric_limits<int32_t>::min()) ||
+ (Val > -4096 && Val < 4096);
}
+
bool isAddrMode3() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- // The #-0 offset is encoded as INT32_MIN, and we have to check
- // for this too.
- return (Val > -256 && Val < 256) || Val == INT32_MIN;
+ // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
+ // have to check for this too.
+ return (Val > -256 && Val < 256) ||
+ Val == std::numeric_limits<int32_t>::min();
}
+
bool isAM3Offset() const {
if (Kind != k_Immediate && Kind != k_PostIndexRegister)
return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
- // Special case, #-0 is INT32_MIN.
- return (Val > -256 && Val < 256) || Val == INT32_MIN;
+ // Special case, #-0 is std::numeric_limits<int32_t>::min().
+ return (Val > -256 && Val < 256) ||
+ Val == std::numeric_limits<int32_t>::min();
}
+
bool isAddrMode5() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
- Val == INT32_MIN;
+ Val == std::numeric_limits<int32_t>::min();
}
+
bool isAddrMode5FP16() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// Immediate offset in range [-510, 510] and a multiple of 2.
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) || Val == INT32_MIN;
+ return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
+ Val == std::numeric_limits<int32_t>::min();
}
+
bool isMemTBB() const {
if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
return false;
return true;
}
+
bool isMemTBH() const {
if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
return false;
return true;
}
+
bool isMemRegOffset() const {
if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
return false;
return true;
}
+
bool isT2MemRegOffset() const {
if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
return false;
return true;
}
+
bool isMemThumbRR() const {
// Thumb reg+reg addressing is simple. Just two registers, a base and
// an offset. No shifts, negations or any other complicating factors.
return isARMLowRegister(Memory.BaseRegNum) &&
(!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
}
+
bool isMemThumbRIs4() const {
if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 124 && (Val % 4) == 0;
}
+
bool isMemThumbRIs2() const {
if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 62 && (Val % 2) == 0;
}
+
bool isMemThumbRIs1() const {
if (!isMem() || Memory.OffsetRegNum != 0 ||
!isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 31;
}
+
bool isMemThumbSPI() const {
if (!isMem() || Memory.OffsetRegNum != 0 ||
Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
}
+
bool isMemImm8s4Offset() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// Immediate offset a multiple of 4 in range [-1020, 1020].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- // Special case, #-0 is INT32_MIN.
- return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
+ // Special case, #-0 is std::numeric_limits<int32_t>::min().
+ return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
+ Val == std::numeric_limits<int32_t>::min();
}
+
bool isMemImm0_1020s4Offset() const {
if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
}
+
bool isMemImm8Offset() const {
if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- return (Val == INT32_MIN) || (Val > -256 && Val < 256);
+ return (Val == std::numeric_limits<int32_t>::min()) ||
+ (Val > -256 && Val < 256);
}
+
bool isMemPosImm8Offset() const {
if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
int64_t Val = Memory.OffsetImm->getValue();
return Val >= 0 && Val < 256;
}
+
bool isMemNegImm8Offset() const {
if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [-255, -1].
if (!Memory.OffsetImm) return false;
int64_t Val = Memory.OffsetImm->getValue();
- return (Val == INT32_MIN) || (Val > -256 && Val < 0);
+ return (Val == std::numeric_limits<int32_t>::min()) ||
+ (Val > -256 && Val < 0);
}
+
bool isMemUImm12Offset() const {
if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= 0 && Val < 4096);
}
+
bool isMemImm12Offset() const {
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// Immediate offset in range [-4095, 4095].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
- return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
+ return (Val > -4096 && Val < 4096) ||
+ (Val == std::numeric_limits<int32_t>::min());
}
+
bool isConstPoolAsmImm() const {
// Delay processing of Constant Pool Immediate, this will turn into
// a constant. Match no other operand
return (isConstantPoolImm());
}
+
bool isPostIdxImm8() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
- return (Val > -256 && Val < 256) || (Val == INT32_MIN);
+ return (Val > -256 && Val < 256) ||
+ (Val == std::numeric_limits<int32_t>::min());
}
+
bool isPostIdxImm8s4() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
- (Val == INT32_MIN);
+ (Val == std::numeric_limits<int32_t>::min());
}
bool isMSRMask() const { return Kind == k_MSRMask; }
bool isSingleSpacedVectorList() const {
return Kind == k_VectorList && !VectorList.isDoubleSpaced;
}
+
bool isDoubleSpacedVectorList() const {
return Kind == k_VectorList && VectorList.isDoubleSpaced;
}
+
bool isVecListOneD() const {
if (!isSingleSpacedVectorList()) return false;
return VectorList.Count == 1;
bool isSingleSpacedVectorAllLanes() const {
return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
}
+
bool isDoubleSpacedVectorAllLanes() const {
return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
}
+
bool isVecListOneDAllLanes() const {
if (!isSingleSpacedVectorAllLanes()) return false;
return VectorList.Count == 1;
bool isSingleSpacedVectorIndexed() const {
return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
}
+
bool isDoubleSpacedVectorIndexed() const {
return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
}
+
bool isVecListOneDByteIndexed() const {
if (!isSingleSpacedVectorIndexed()) return false;
return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val < 8;
}
+
bool isVectorIndex16() const {
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val < 4;
}
+
bool isVectorIndex32() const {
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val < 2;
}
return true;
}
+
bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
+
bool isNEONi32vmov() const {
if (isNEONByteReplicate(4))
return false; // Let it to be classified as byte-replicate case.
(Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
(Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
}
+
bool isNEONi32vmovNeg() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!Memory.OffsetRegNum) {
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
- if (Val == INT32_MIN) Val = 0;
+ if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
} else {
int32_t Val = CE->getValue();
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
- if (Val == INT32_MIN) Val = 0;
+ if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
Inst.addOperand(MCOperand::createReg(0));
if (!Memory.OffsetRegNum) {
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
- if (Val == INT32_MIN) Val = 0;
+ if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM3Opc(AddSub, Val);
} else {
int32_t Val = CE->getValue();
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
- if (Val == INT32_MIN) Val = 0;
+ if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM3Opc(AddSub, Val);
Inst.addOperand(MCOperand::createReg(0));
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
- if (Val == INT32_MIN) Val = 0;
+ if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM5Opc(AddSub, Val);
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
// Special case for #-0
- if (Val == INT32_MIN) Val = 0;
+ if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
if (Val < 0) Val = -Val;
Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
assert(CE && "non-constant post-idx-imm8 operand!");
int Imm = CE->getValue();
bool isAdd = Imm >= 0;
- if (Imm == INT32_MIN) Imm = 0;
+ if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
Inst.addOperand(MCOperand::createImm(Imm));
}
assert(CE && "non-constant post-idx-imm8s4 operand!");
int Imm = CE->getValue();
bool isAdd = Imm >= 0;
- if (Imm == INT32_MIN) Imm = 0;
+ if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
// Immediate is scaled by 4.
Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
Inst.addOperand(MCOperand::createImm(Imm));
B |= 0xe00; // cmode = 0b1110
Inst.addOperand(MCOperand::createImm(B));
}
+
void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
B |= 0xe00; // cmode = 0b1110
Inst.addOperand(MCOperand::createImm(B));
}
+
void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate encodes the type of constant as well as the value.
static std::unique_ptr<ARMOperand>
CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
SMLoc StartLoc, SMLoc EndLoc) {
- assert (Regs.size() > 0 && "RegList contains no registers?");
+ assert(Regs.size() > 0 && "RegList contains no registers?");
KindTy Kind = k_RegisterList;
if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
array_pod_sort(Regs.begin(), Regs.end());
auto Op = make_unique<ARMOperand>(Kind);
- for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
+ for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
I = Regs.begin(), E = Regs.end(); I != E; ++I)
Op->Registers.push_back(I->second);
Op->StartLoc = StartLoc;
/// Try to parse a register name. The token must be an Identifier when called,
/// and if it is a register name the token is eaten and the register number is
/// returned. Otherwise return -1.
-///
int ARMAsmParser::tryParseRegister() {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
return 0;
}
-
/// Try to parse a register name. The token must be an Identifier when called.
/// If it's a register, an AsmOperand is created. Another AsmOperand is created
/// if there is a "writeback". 'true' if it's not a register.
&ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
}
-
Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
(Spacing == 2), S, E));
break;
Error(S, "constant expression expected");
return MatchOperand_ParseFail;
}
- // Negative zero is encoded as the flag value INT32_MIN.
+ // Negative zero is encoded as the flag value
+ // std::numeric_limits<int32_t>::min().
int32_t Val = CE->getValue();
if (isNegative && Val == 0)
- Val = INT32_MIN;
+ Val = std::numeric_limits<int32_t>::min();
Operands.push_back(
ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
return MatchOperand_Success;
}
-
bool haveEaten = false;
bool isAdd = true;
if (Tok.is(AsmToken::Plus)) {
if (!CE)
return Error (E, "constant expression expected");
- // If the constant was #-0, represent it as INT32_MIN.
+ // If the constant was #-0, represent it as
+ // std::numeric_limits<int32_t>::min().
int32_t Val = CE->getValue();
if (isNegative && Val == 0)
- CE = MCConstantExpr::create(INT32_MIN, getContext());
+ CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
+ getContext());
// Now we should have the closing ']'
if (Parser.getTok().isNot(AsmToken::RBrac))
case AsmToken::LCurly:
return parseRegisterList(Operands);
case AsmToken::Dollar:
- case AsmToken::Hash: {
+ case AsmToken::Hash:
// #42 -> immediate.
S = Parser.getTok().getLoc();
Parser.Lex();
if (CE) {
int32_t Val = CE->getValue();
if (isNegative && Val == 0)
- ImmVal = MCConstantExpr::create(INT32_MIN, getContext());
+ ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
+ getContext());
}
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
}
// w/ a ':' after the '#', it's just like a plain ':'.
LLVM_FALLTHROUGH;
- }
+
case AsmToken::Colon: {
S = Parser.getTok().getLoc();
// ":lower16:" and ":upper16:" expression prefixes
!inITBlock()))
return true;
-
-
// Register-register 'add/sub' for thumb does not have a cc_out operand
// when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
// the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
}
+
static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
unsigned VariantID);
Inst.getOpcode() == ARM::BKPT ||
Inst.getOpcode() == ARM::tHLT ||
Inst.getOpcode() == ARM::HLT;
-
}
bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
case ARM::t2LDMIA_UPD:
case ARM::t2LDMDB_UPD:
case ARM::t2STMIA_UPD:
- case ARM::t2STMDB_UPD: {
+ case ARM::t2STMDB_UPD:
if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
return Error(Operands.back()->getStartLoc(),
"writeback register not allowed in register list");
return true;
}
break;
- }
+
case ARM::sysLDMIA_UPD:
case ARM::sysLDMDA_UPD:
case ARM::sysLDMDB_UPD:
case ARM::sysSTMIB_UPD:
return Error(Operands[2]->getStartLoc(),
"system STM cannot have writeback register");
- case ARM::tMUL: {
+ case ARM::tMUL:
// The second source operand must be the same register as the destination
// operand.
//
"destination register must match source register");
}
break;
- }
+
// Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
// so only issue a diagnostic for thumb1. The instructions will be
// switched to the t2 encodings in processInstruction() if necessary.
return true;
break;
}
- case ARM::tADDrSP: {
+ case ARM::tADDrSP:
// If the non-SP source operand and the destination operand are not the
// same, we need thumb2 (for the wide encoding), or we have an error.
if (!isThumbTwo() &&
"source register must be the same as destination");
}
break;
- }
+
// Final range checking for Thumb unconditional branch instructions.
case ARM::tB:
if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
break;
}
case ARM::HINT:
- case ARM::t2HINT: {
+ case ARM::t2HINT:
if (hasRAS()) {
// ESB is not predicable (pred must be AL)
unsigned Imm8 = Inst.getOperand(0).getImm();
// Without the RAS extension, this behaves as any other unallocated hint.
break;
}
- }
return false;
}
// Handle encoding choice for the shift-immediate instructions.
case ARM::t2LSLri:
case ARM::t2LSRri:
- case ARM::t2ASRri: {
+ case ARM::t2ASRri:
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
isARMLowRegister(Inst.getOperand(1).getReg()) &&
Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
return true;
}
return false;
- }
// Handle the Thumb2 mode MOV complex aliases.
case ARM::t2MOVsr:
Inst = TmpInst;
return true;
}
- case ARM::tADDrSP: {
+ case ARM::tADDrSP:
// If the non-SP source operand and the destination operand are not the
// same, we need to use the 32-bit encoding if it's available.
if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
return true;
}
break;
- }
case ARM::tB:
// A Thumb conditional branch outside of an IT block is a tBcc.
if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
(!listContainsBase && !hasWritebackToken) ||
(listContainsBase && hasWritebackToken)) {
// 16-bit encoding isn't sufficient. Switch to the 32-bit version.
- assert (isThumbTwo());
+ assert(isThumbTwo());
Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
// If we're switching to the updating version, we need to insert
// the writeback tied operand.
bool listContainsBase;
if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
// 16-bit encoding isn't sufficient. Switch to the 32-bit version.
- assert (isThumbTwo());
+ assert(isThumbTwo());
Inst.setOpcode(ARM::t2STMIA_UPD);
return true;
}
// should have generated an error in validateInstruction().
if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
return false;
- assert (isThumbTwo());
+ assert(isThumbTwo());
Inst.setOpcode(ARM::t2LDMIA_UPD);
// Add the base register and writeback operands.
Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
bool listContainsBase;
if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
return false;
- assert (isThumbTwo());
+ assert(isThumbTwo());
Inst.setOpcode(ARM::t2STMDB_UPD);
// Add the base register and writeback operands.
Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
return true;
}
- case ARM::t2MOVi: {
+ case ARM::t2MOVi:
// If we can use the 16-bit encoding and the user didn't explicitly
// request the 32-bit variant, transform it here.
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
return true;
}
break;
- }
- case ARM::t2MOVr: {
+
+ case ARM::t2MOVr:
// If we can use the 16-bit encoding and the user didn't explicitly
// request the 32-bit variant, transform it here.
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
return true;
}
break;
- }
+
case ARM::t2SXTH:
case ARM::t2SXTB:
case ARM::t2UXTH:
- case ARM::t2UXTB: {
+ case ARM::t2UXTB:
// If we can use the 16-bit encoding and the user didn't explicitly
// request the 32-bit variant, transform it here.
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
return true;
}
break;
- }
+
case ARM::MOVsi: {
ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
// rrx shifts and asr/lsr of #32 is encoded as 0
case ARM::t2SBCrr:
case ARM::t2RORrr:
case ARM::t2BICrr:
- {
// Assemblers should use the narrow encodings of these instructions when permissible.
if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
isARMLowRegister(Inst.getOperand(2).getReg())) &&
return true;
}
return false;
- }
+
case ARM::t2ANDrr:
case ARM::t2EORrr:
case ARM::t2ADCrr:
case ARM::t2ORRrr:
- {
// Assemblers should use the narrow encodings of these instructions when permissible.
// These instructions are special in that they are commutable, so shorter encodings
// are available more often.
}
return false;
}
- }
return false;
}
}
namespace llvm {
+
template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
return true; // In an assembly source, no need to second-guess
}
-}
+
+} // end namespace llvm
// Returns true if Inst is unpredictable if it is in and IT block, but is not
// the last instruction in the block.
return false;
}
+
/// parseDirectiveFPU
/// ::= .fpu str
bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
int64_t Value;
if (!SOExpr->evaluateAsAbsolute(Value))
return Match_Success;
- assert((Value >= INT32_MIN && Value <= UINT32_MAX) &&
+ assert((Value >= std::numeric_limits<int32_t>::min() &&
+ Value <= std::numeric_limits<uint32_t>::max()) &&
"expression value must be representable in 32 bits");
}
break;
-//===-- ARMDisassembler.cpp - Disassembler for ARM/Thumb ISA --------------===//
+//===- ARMDisassembler.cpp - Disassembler for ARM/Thumb ISA ---------------===//
//
// The LLVM Compiler Infrastructure
//
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMMCTargetDesc.h"
+#include "Utils/ARMBaseInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#define DEBUG_TYPE "arm-disassembler"
-typedef MCDisassembler::DecodeStatus DecodeStatus;
+using DecodeStatus = MCDisassembler::DecodeStatus;
namespace {
private:
mutable ITStatus ITBlock;
+
DecodeStatus AddThumbPredicate(MCInst&) const;
void UpdateThumbVFPPredicate(MCInst&) const;
};
break;
}
-
// First input register
switch (Inst.getOpcode()) {
case ARM::VST1q16:
return S;
}
-
static DecodeStatus DecodeThumbAddSPImm(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder) {
unsigned imm = fieldFromInstruction(Insn, 0, 7);
static DecodeStatus DecodeBankedReg(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
-
unsigned R = fieldFromInstruction(Val, 5, 1);
unsigned SysM = fieldFromInstruction(Val, 0, 5);
-//===-- Thumb1FrameLowering.cpp - Thumb1 Frame Information ----------------===//
+//===- Thumb1FrameLowering.cpp - Thumb1 Frame Information -----------------===//
//
// The LLVM Compiler Infrastructure
//
#include "ARMBaseRegisterInfo.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
-#include "MCTargetDesc/ARMBaseInfo.h"
#include "Thumb1InstrInfo.h"
#include "ThumbRegisterInfo.h"
+#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/DebugLoc.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <bitset>
#include <cassert>
MRI, MIFlags);
}
-
MachineBasicBlock::iterator Thumb1FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
return true;
}
-typedef std::bitset<ARM::NUM_TARGET_REGS> ARMRegSet;
+using ARMRegSet = std::bitset<ARM::NUM_TARGET_REGS>;
// Return the first iteraror after CurrentReg which is present in EnabledRegs,
// or OrderEnd if no further registers are in that set. This does not advance
-//===-- Thumb1FrameLowering.h - Thumb1-specific frame info stuff --*- C++ -*-=//
+//===- Thumb1FrameLowering.h - Thumb1-specific frame info stuff ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_ARM_THUMB1FRAMELOWERING_H
#define LLVM_LIB_TARGET_ARM_THUMB1FRAMELOWERING_H
#include "ARMFrameLowering.h"
-#include "Thumb1InstrInfo.h"
-#include "ThumbRegisterInfo.h"
-#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
+class ARMSubtarget;
+class MachineFunction;
+
class Thumb1FrameLowering : public ARMFrameLowering {
public:
explicit Thumb1FrameLowering(const ARMSubtarget &sti);
bool emitPopSpecialFixUp(MachineBasicBlock &MBB, bool DoIt) const;
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_LIB_TARGET_ARM_THUMB1FRAMELOWERING_H