MachineFunction *MF;
/// Information used to access the description of the opcodes.
const TargetInstrInfo *TII;
+ /// Information used to verify types are consistent.
+ const MachineRegisterInfo *MRI;
/// Debug location to be set to any instruction we create.
DebugLoc DL;
return *TII;
}
- void validateTruncExt(ArrayRef<LLT> Tys, bool IsExtend);
+ void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
public:
/// Getter for the function we currently build.
/// \pre Ty == LLT{} or isPreISelGenericOpcode(Opcode)
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildInstr(unsigned Opcode, ArrayRef<LLT> Tys);
-
- /// Build and insert <empty> = \p Opcode <empty>.
- ///
- /// \pre setBasicBlock or setMI must have been called.
- /// \pre not isPreISelGenericOpcode(\p Opcode)
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildInstr(unsigned Opcode) {
- return buildInstr(Opcode, ArrayRef<LLT>());
- }
+ MachineInstrBuilder buildInstr(unsigned Opcode);
/// Build and insert \p Res<def> = G_FRAME_INDEX \p Ty \p Idx
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildFrameIndex(LLT Ty, unsigned Res, int Idx);
+ MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx);
/// Build and insert \p Res<def> = G_ADD \p Ty \p Op0, \p Op1
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildAdd(LLT Ty, unsigned Res, unsigned Op0,
- unsigned Op1);
+ MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
+ unsigned Op1);
/// Build and insert \p Res<def> = G_SUB \p Ty \p Op0, \p Op1
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildSub(LLT Ty, unsigned Res, unsigned Op0,
+ MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
unsigned Op1);
/// Build and insert \p Res<def> = G_MUL \p Ty \p Op0, \p Op1
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildMul(LLT Ty, unsigned Res, unsigned Op0,
+ MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
unsigned Op1);
/// Build and insert \p Res<def>, \p CarryOut = G_UADDE \p Tys \p Op0, \p Op1,
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildUAdde(ArrayRef<LLT> Tys, unsigned Res,
- unsigned CarryOut, unsigned Op0, unsigned Op1,
- unsigned CarryIn);
+ MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
+ unsigned Op1, unsigned CarryIn);
/// Build and insert \p Res<def> = G_TYPE \p Ty \p Op.
///
/// register).
///
/// \return The newly created instruction.
- MachineInstrBuilder buildType(LLT Ty, unsigned Res, unsigned Op);
+ MachineInstrBuilder buildType(unsigned Res, unsigned Op);
/// Build and insert \p Res<def> = G_ANYEXT \p { DstTy, SrcTy } \p Op0
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildAnyExt(ArrayRef<LLT> Tys, unsigned Res, unsigned Op);
+ MachineInstrBuilder buildAnyExt(unsigned Res, unsigned Op);
/// Build and insert \p Res<def> = G_SEXT \p { DstTy, SrcTy }\p Op
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildSExt(ArrayRef<LLT> Tys, unsigned Res, unsigned Op);
+ MachineInstrBuilder buildSExt(unsigned Res, unsigned Op);
/// Build and insert \p Res<def> = G_ZEXT \p { DstTy, SrcTy } \p Op
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildZExt(ArrayRef<LLT> Tys, unsigned Res, unsigned Op);
+ MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
/// Build and insert G_BR unsized \p Dest
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildBrCond(LLT Ty, unsigned Tst, MachineBasicBlock &BB);
+ MachineInstrBuilder buildBrCond(unsigned Tst, MachineBasicBlock &BB);
/// Build and insert \p Res = G_CONSTANT \p Ty \p Val
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildConstant(LLT Ty, unsigned Res, int64_t Val);
+ MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
/// Build and insert \p Res = G_FCONSTANT \p Ty \p Val
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildFConstant(LLT Ty, unsigned Res,
- const ConstantFP &Val);
+ MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
/// Build and insert \p Res<def> = COPY Op
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildLoad(LLT VTy, LLT PTy, unsigned Res, unsigned Addr,
+ MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr,
MachineMemOperand &MMO);
/// Build and insert `G_STORE { VTy, PTy } Val, Addr, MMO`.
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildStore(LLT VTy, LLT PTy, unsigned Val, unsigned Addr,
+ MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO);
/// Build and insert `Res0<def>, ... = G_EXTRACT { ResTys, SrcTy } Src, Idx0,
/// \pre \p Indices must be in ascending order of bit position.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildExtract(ArrayRef<LLT> ResTys,
- ArrayRef<unsigned> Results,
- ArrayRef<uint64_t> Indices, LLT SrcTy,
- unsigned Src);
+ MachineInstrBuilder buildExtract(ArrayRef<unsigned> Results,
+ ArrayRef<uint64_t> Indices, unsigned Src);
/// Build and insert \p Res<def> = G_SEQUENCE \p { \pResTy, \p Op0Ty, ... }
/// \p Op0, \p Idx0...
/// \pre \p Indices must be in ascending order of bit position.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildSequence(LLT ResTy, unsigned Res,
- ArrayRef<LLT> OpTys,
+ MachineInstrBuilder buildSequence(unsigned Res,
ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Indices);
void addUsesWithIndices(MachineInstrBuilder MIB) {}
template <typename... ArgTys>
- void addUsesWithIndices(MachineInstrBuilder MIB, LLT Ty, unsigned Reg,
+ void addUsesWithIndices(MachineInstrBuilder MIB, unsigned Reg,
unsigned BitIndex, ArgTys... Args) {
MIB.addUse(Reg).addImm(BitIndex);
- MIB->setType(Ty, MIB->getNumTypes());
-
addUsesWithIndices(MIB, Args...);
}
template <typename... ArgTys>
- MachineInstrBuilder buildSequence(LLT Ty, unsigned Res, LLT OpTy, unsigned Op,
+ MachineInstrBuilder buildSequence(unsigned Res, unsigned Op,
unsigned Index, ArgTys... Args) {
MachineInstrBuilder MIB =
- buildInstr(TargetOpcode::G_SEQUENCE, Ty).addDef(Res);
- addUsesWithIndices(MIB, OpTy, Op, Index, Args...);
+ buildInstr(TargetOpcode::G_SEQUENCE).addDef(Res);
+ addUsesWithIndices(MIB, Op, Index, Args...);
return MIB;
}
template <typename... ArgTys>
- MachineInstrBuilder buildInsert(LLT Ty, unsigned Res, unsigned Src, LLT OpTy,
+ MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
unsigned Op, unsigned Index, ArgTys... Args) {
MachineInstrBuilder MIB =
- buildInstr(TargetOpcode::G_INSERT, Ty).addDef(Res).addUse(Src);
- addUsesWithIndices(MIB, OpTy, Op, Index, Args...);
+ buildInstr(TargetOpcode::G_INSERT).addDef(Res).addUse(Src);
+ addUsesWithIndices(MIB, Op, Index, Args...);
return MIB;
}
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildIntrinsic(ArrayRef<LLT> Tys, Intrinsic::ID ID,
- unsigned Res, bool HasSideEffects);
+ MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res,
+ bool HasSideEffects);
/// Build and insert \p Res<def> = G_FPTRUNC \p { DstTy, SrcTy } \p Op
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildFPTrunc(ArrayRef<LLT> Ty, unsigned Res, unsigned Op);
+ MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
/// Build and insert \p Res<def> = G_TRUNC \p { DstTy, SrcTy } \p Op
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return The newly created instruction.
- MachineInstrBuilder buildTrunc(ArrayRef<LLT> Tys, unsigned Res, unsigned Op);
+ MachineInstrBuilder buildTrunc(unsigned Res, unsigned Op);
/// Build and insert a G_ICMP
///
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildICmp(ArrayRef<LLT> Tys, CmpInst::Predicate Pred,
+ MachineInstrBuilder buildICmp(CmpInst::Predicate Pred,
unsigned Res, unsigned Op0, unsigned Op1);
/// Build and insert a G_FCMP
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildFCmp(ArrayRef<LLT> Tys, CmpInst::Predicate Pred,
+ MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred,
unsigned Res, unsigned Op0, unsigned Op1);
/// Build and insert a \p Res = G_SELECT { \p Ty, s1 } \p Tst, \p Op0, \p Op1
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildSelect(LLT Ty, unsigned Res, unsigned Tst,
+ MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
unsigned Op0, unsigned Op1);
};
namespace llvm {
class LLVMContext;
class MachineInstr;
+class MachineRegisterInfo;
class Type;
class VectorType;
/// performed and the destination type.
std::pair<LegalizeAction, LLT> getAction(const InstrAspect &Aspect) const;
- /// Determine what action should be taken to legalize the given generic instruction.
+ /// Determine what action should be taken to legalize the given generic
+ /// instruction.
///
/// \returns a tuple consisting of the LegalizeAction that should be
/// performed, the type-index it should be performed on and the destination
/// type.
std::tuple<LegalizeAction, unsigned, LLT>
- getAction(const MachineInstr &MI) const;
+ getAction(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
/// Iterate the given function (typically something like doubling the width)
/// on Ty until we find a legal type for this operation.
return ActionIt->second;
}
- bool isLegal(const MachineInstr &MI) const;
+ bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
private:
static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/MC/MCInstrDesc.h"
DebugLoc debugLoc; // Source line information.
-#ifdef LLVM_BUILD_GLOBAL_ISEL
- /// Type of the instruction in case of a generic opcode.
- /// \invariant This must be LLT{} if getOpcode() is not
- /// in the range of generic opcodes.
- SmallVector<LLT, 1> Tys;
-#endif
-
MachineInstr(const MachineInstr&) = delete;
void operator=(const MachineInstr&) = delete;
// Use MachineFunction::DeleteMachineInstr() instead.
Flags &= ~((uint8_t)Flag);
}
- /// Set the type of the instruction.
- /// \pre getOpcode() is in the range of the generic opcodes.
- void setType(LLT Ty, unsigned Idx = 0);
- LLT getType(int unsigned = 0) const;
- unsigned getNumTypes() const;
- void removeTypes();
/// Return true if MI is in a bundle (but not the first MI in a bundle).
///
#include "llvm/ADT/iterator_range.h"
// PointerUnion needs to have access to the full RegisterBank type.
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
+#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Target/TargetRegisterInfo.h"
/// started.
BitVector ReservedRegs;
- typedef DenseMap<unsigned, unsigned> VRegToSizeMap;
+ typedef DenseMap<unsigned, LLT> VRegToTypeMap;
/// Map generic virtual registers to their actual size.
- mutable std::unique_ptr<VRegToSizeMap> VRegToSize;
+ mutable std::unique_ptr<VRegToTypeMap> VRegToType;
- /// Accessor for VRegToSize. This accessor should only be used
+ /// Accessor for VRegToType. This accessor should only be used
/// by global-isel related work.
- VRegToSizeMap &getVRegToSize() const {
- if (!VRegToSize)
- VRegToSize.reset(new VRegToSizeMap);
- return *VRegToSize.get();
+ VRegToTypeMap &getVRegToType() const {
+ if (!VRegToType)
+ VRegToType.reset(new VRegToTypeMap);
+ return *VRegToType.get();
}
/// Keep track of the physical registers that are live in to the function.
///
unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
- /// Get the size in bits of \p VReg or 0 if VReg is not a generic
+ /// Get the low-level type of \p VReg or LLT{} if VReg is not a generic
/// (target independent) virtual register.
- unsigned getSize(unsigned VReg) const;
+ LLT getType(unsigned VReg) const;
- /// Set the size in bits of \p VReg to \p Size.
- /// Although the size should be set at build time, mir infrastructure
- /// is not yet able to do it.
- void setSize(unsigned VReg, unsigned Size);
+ /// Set the low-level type of \p VReg to \p Ty.
+ void setType(unsigned VReg, LLT Ty);
- /// Create and return a new generic virtual register with a size of \p Size.
- /// \pre Size > 0.
- unsigned createGenericVirtualRegister(unsigned Size);
+ /// Create and return a new generic virtual register with low-level
+ /// type \p Ty.
+ unsigned createGenericVirtualRegister(LLT Ty);
- /// Remove all sizes associated to virtual registers (after instruction
+ /// Remove all types associated to virtual registers (after instruction
/// selection and constraining of all generic virtual registers).
- void clearVirtRegSizes();
+ void clearVirtRegTypes();
/// getNumVirtRegs - Return the number of virtual registers created.
///
OPERAND_REGISTER = 2,
OPERAND_MEMORY = 3,
OPERAND_PCREL = 4,
- OPERAND_FIRST_TARGET = 5
+
+ OPERAND_FIRST_GENERIC = 6,
+ OPERAND_GENERIC_0 = 6,
+ OPERAND_GENERIC_1 = 7,
+ OPERAND_GENERIC_2 = 8,
+ OPERAND_GENERIC_3 = 9,
+ OPERAND_GENERIC_4 = 10,
+ OPERAND_GENERIC_5 = 11,
+ OPERAND_LAST_GENERIC = 11,
+
+ OPERAND_FIRST_TARGET = 12,
};
+
+enum GenericOperandType {
+};
+
}
/// \brief This holds information about one operand of a machine instruction,
/// \brief Set if this operand is a optional def.
bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); }
+
+ bool isGenericType() const {
+ return OperandType >= MCOI::OPERAND_FIRST_GENERIC &&
+ OperandType <= MCOI::OPERAND_LAST_GENERIC;
+ }
+
+ unsigned getGenericTypeIndex() const {
+ assert(isGenericType() && "non-generic types don't have an index");
+ return OperandType - MCOI::OPERAND_FIRST_GENERIC;
+ }
};
//===----------------------------------------------------------------------===//
// Extend the underlying scalar type of an operation, leaving the high bits
// unspecified.
def G_ANYEXT : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
// Sign extend the underlying scalar type of an operation, copying the sign bit
// into the newly-created space.
def G_SEXT : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
// Zero extend the underlying scalar type of an operation, putting zero bits
// into the newly-created space.
def G_ZEXT : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
// Truncate the underlying scalar type of an operation. This is equivalent to
// G_EXTRACT for scalar types, but acts elementwise on vectors.
def G_TRUNC : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_FRAME_INDEX : Instruction {
- let OutOperandList = (outs unknown:$dst);
+ let OutOperandList = (outs type0:$dst);
let InOperandList = (ins unknown:$src2);
let hasSideEffects = 0;
}
def G_INTTOPTR : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_PTRTOINT : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_BITCAST : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_CONSTANT : Instruction {
- let OutOperandList = (outs unknown:$dst);
+ let OutOperandList = (outs type0:$dst);
let InOperandList = (ins unknown:$imm);
let hasSideEffects = 0;
}
def G_FCONSTANT : Instruction {
- let OutOperandList = (outs unknown:$dst);
+ let OutOperandList = (outs type0:$dst);
let InOperandList = (ins unknown:$imm);
let hasSideEffects = 0;
}
def G_TYPE : Instruction {
- let OutOperandList = (outs unknown:$dst);
+ let OutOperandList = (outs type0:$dst);
let InOperandList = (ins unknown:$imm);
let hasSideEffects = 0;
}
// Generic addition.
def G_ADD : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic subtraction.
def G_SUB : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 0;
}
// Generic multiplication.
def G_MUL : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic signed division.
def G_SDIV : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 0;
}
// Generic unsigned division.
def G_UDIV : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 0;
}
// Generic signed remainder.
def G_SREM : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 0;
}
// Generic unsigned remainder.
def G_UREM : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 0;
}
// Generic bitwise and.
def G_AND : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic bitwise or.
def G_OR : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic bitwise xor.
def G_XOR : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic left-shift.
def G_SHL : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
}
// Generic logical right-shift.
def G_LSHR : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
}
// Generic arithmetic right-shift.
def G_ASHR : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
}
// Generic integer comparison.
def G_ICMP : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$tst, unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2);
let hasSideEffects = 0;
}
// Generic floating-point comparison.
def G_FCMP : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$tst, unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2);
let hasSideEffects = 0;
}
// Generic select
def G_SELECT : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$tst, unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$tst, type0:$src1, type0:$src2);
let hasSideEffects = 0;
}
// Generic unsigned addition consuming and producing a carry flag.
def G_UADDE : Instruction {
- let OutOperandList = (outs unknown:$dst, unknown:$carry_out);
- let InOperandList = (ins unknown:$src1, unknown:$src2, unknown:$carry_in);
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
let hasSideEffects = 0;
}
// Generic signed addition producing a carry flag.
def G_SADDO : Instruction {
- let OutOperandList = (outs unknown:$dst, unknown:$carry_out);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic unsigned subtraction consuming and producing a carry flag.
def G_USUBE : Instruction {
- let OutOperandList = (outs unknown:$dst, unknown:$carry_out);
- let InOperandList = (ins unknown:$src1, unknown:$src2, unknown:$carry_in);
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
let hasSideEffects = 0;
}
// Generic unsigned subtraction producing a carry flag.
def G_SSUBO : Instruction {
- let OutOperandList = (outs unknown:$dst, unknown:$carry_out);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
}
// Generic unsigned multiplication producing a carry flag.
def G_UMULO : Instruction {
- let OutOperandList = (outs unknown:$dst, unknown:$carry_out);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic signed multiplication producing a carry flag.
def G_SMULO : Instruction {
- let OutOperandList = (outs unknown:$dst, unknown:$carry_out);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst, type1:$carry_out);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
//------------------------------------------------------------------------------
def G_FPEXT : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_FPTRUNC : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_FPTOSI : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_FPTOUI : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_SITOFP : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
def G_UITOFP : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src);
let hasSideEffects = 0;
}
// Generic FP addition.
def G_FADD : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic FP subtraction.
def G_FSUB : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 0;
}
// Generic FP multiplication.
def G_FMUL : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
let isCommutable = 1;
}
// Generic FP division.
def G_FDIV : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
}
// Generic FP remainder.
def G_FREM : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src1, unknown:$src2);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src1, type0:$src2);
let hasSideEffects = 0;
}
// Generic load. Expects a MachineMemOperand in addition to explicit operands.
def G_LOAD : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$addr);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$addr);
let hasSideEffects = 0;
let mayLoad = 1;
}
// Generic store. Expects a MachineMemOperand in addition to explicit operands.
def G_STORE : Instruction {
let OutOperandList = (outs);
- let InOperandList = (ins unknown:$src, unknown:$addr);
+ let InOperandList = (ins type0:$src, type1:$addr);
let hasSideEffects = 0;
let mayStore = 1;
}
// indices (interleaved with the values in the operand list "op0, bit0, op1,
// bit1, ...")).
def G_INSERT : Instruction {
- let OutOperandList = (outs unknown:$dst);
- let InOperandList = (ins unknown:$src, variable_ops);
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src, variable_ops);
let hasSideEffects = 0;
}
// bit 0). Essentially a G_INSERT where $src is an IMPLICIT_DEF, but it's so
// important to legalization it probably deserves its own instruction.
def G_SEQUENCE : Instruction {
- let OutOperandList = (outs unknown:$dst);
+ let OutOperandList = (outs type0:$dst);
let InOperandList = (ins variable_ops);
let hasSideEffects = 0;
}
// PHI node bearing an LLT.
def G_PHI : Instruction {
- let OutOperandList = (outs unknown:$dst);
+ let OutOperandList = (outs type0:$dst);
let InOperandList = (ins variable_ops);
let hasSideEffects = 0;
}
// Generic conditional branch.
def G_BRCOND : Instruction {
let OutOperandList = (outs);
- let InOperandList = (ins unknown:$tst, unknown:$truebb);
+ let InOperandList = (ins type0:$tst, unknown:$truebb);
let hasSideEffects = 0;
let isBranch = 1;
let isTerminator = 1;
def f64imm : Operand<f64>;
}
+// Register operands for generic instructions don't have an MVT, but do have
+// constraints linking the operands (e.g. all operands of a G_ADD must
+// have the same LLT).
+class TypedOperand<string Ty> : Operand<untyped> {
+ let OperandType = Ty;
+}
+
+def type0 : TypedOperand<"OPERAND_GENERIC_0">;
+def type1 : TypedOperand<"OPERAND_GENERIC_1">;
+def type2 : TypedOperand<"OPERAND_GENERIC_2">;
+def type3 : TypedOperand<"OPERAND_GENERIC_3">;
+def type4 : TypedOperand<"OPERAND_GENERIC_4">;
+def type5 : TypedOperand<"OPERAND_GENERIC_5">;
+
/// zero_reg definition - Special node to stand for the zero register.
///
def zero_reg;
// we need to concat together to produce the value.
assert(Val.getType()->isSized() &&
"Don't know how to create an empty vreg");
- unsigned Size = DL->getTypeSizeInBits(Val.getType());
- unsigned VReg = MRI->createGenericVirtualRegister(Size);
+ unsigned VReg = MRI->createGenericVirtualRegister(LLT{*Val.getType(), DL});
ValReg = VReg;
if (auto CV = dyn_cast<Constant>(&Val)) {
unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
unsigned Res = getOrCreateVReg(U);
- MIRBuilder.buildInstr(Opcode, LLT{*U.getType()})
- .addDef(Res)
- .addUse(Op0)
- .addUse(Op1);
+ MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
return true;
}
cast<ConstantExpr>(U).getPredicate());
if (CmpInst::isIntPredicate(Pred))
- MIRBuilder.buildICmp(
- {LLT{*U.getType()}, LLT{*U.getOperand(0)->getType()}}, Pred, Res, Op0,
- Op1);
+ MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
else
- MIRBuilder.buildFCmp(
- {LLT{*U.getType()}, LLT{*U.getOperand(0)->getType()}}, Pred, Res, Op0,
- Op1);
+ MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
return true;
}
unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
MachineBasicBlock &TrueBB = getOrCreateBB(TrueTgt);
- MIRBuilder.buildBrCond(LLT{*BrInst.getCondition()->getType()}, Tst, TrueBB);
+ MIRBuilder.buildBrCond(Tst, TrueBB);
}
const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
LLT VTy{*LI.getType(), DL}, PTy{*LI.getPointerOperand()->getType()};
MIRBuilder.buildLoad(
- VTy, PTy, Res, Addr,
+ Res, Addr,
*MF.getMachineMemOperand(
MachinePointerInfo(LI.getPointerOperand()), MachineMemOperand::MOLoad,
DL->getTypeStoreSize(LI.getType()), getMemOpAlignment(LI)));
PTy{*SI.getPointerOperand()->getType()};
MIRBuilder.buildStore(
- VTy, PTy, Val, Addr,
+ Val, Addr,
*MF.getMachineMemOperand(
MachinePointerInfo(SI.getPointerOperand()),
MachineMemOperand::MOStore,
uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
unsigned Res = getOrCreateVReg(U);
- MIRBuilder.buildExtract(LLT{*U.getType(), DL}, Res, Offset,
- LLT{*Src->getType(), DL}, getOrCreateVReg(*Src));
+ MIRBuilder.buildExtract(Res, Offset, getOrCreateVReg(*Src));
return true;
}
unsigned Res = getOrCreateVReg(U);
const Value &Inserted = *U.getOperand(1);
- MIRBuilder.buildInsert(LLT{*U.getType(), DL}, Res, getOrCreateVReg(*Src),
- LLT{*Inserted.getType(), DL},
- getOrCreateVReg(Inserted), Offset);
+ MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
+ Offset);
return true;
}
bool IRTranslator::translateSelect(const User &U) {
- MIRBuilder.buildSelect(
- LLT{*U.getType()}, getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
- getOrCreateVReg(*U.getOperand(1)), getOrCreateVReg(*U.getOperand(2)));
+ MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
+ getOrCreateVReg(*U.getOperand(1)),
+ getOrCreateVReg(*U.getOperand(2)));
return true;
}
bool IRTranslator::translateCast(unsigned Opcode, const User &U) {
unsigned Op = getOrCreateVReg(*U.getOperand(0));
unsigned Res = getOrCreateVReg(U);
- MIRBuilder
- .buildInstr(Opcode, {LLT{*U.getType()}, LLT{*U.getOperand(0)->getType()}})
- .addDef(Res)
- .addUse(Op);
+ MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
return true;
}
LLT Ty{*CI.getOperand(0)->getType()};
LLT s1 = LLT::scalar(1);
unsigned Width = Ty.getSizeInBits();
- unsigned Res = MRI->createGenericVirtualRegister(Width);
- unsigned Overflow = MRI->createGenericVirtualRegister(1);
- auto MIB = MIRBuilder.buildInstr(Op, {Ty, s1})
+ unsigned Res = MRI->createGenericVirtualRegister(Ty);
+ unsigned Overflow = MRI->createGenericVirtualRegister(s1);
+ auto MIB = MIRBuilder.buildInstr(Op)
.addDef(Res)
.addDef(Overflow)
.addUse(getOrCreateVReg(*CI.getOperand(0)))
.addUse(getOrCreateVReg(*CI.getOperand(1)));
if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
- unsigned Zero = MRI->createGenericVirtualRegister(1);
- EntryBuilder.buildConstant(s1, Zero, 0);
+ unsigned Zero = MRI->createGenericVirtualRegister(s1);
+ EntryBuilder.buildConstant(Zero, 0);
MIB.addUse(Zero);
}
- MIRBuilder.buildSequence(LLT{*CI.getType(), DL}, getOrCreateVReg(CI), Ty, Res,
- 0, s1, Overflow, Width);
+ MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
return true;
}
if (translateKnownIntrinsic(CI, ID))
return true;
- // Need types (starting with return) & args.
- SmallVector<LLT, 4> Tys;
- Tys.emplace_back(*CI.getType());
- for (auto &Arg : CI.arg_operands())
- Tys.emplace_back(*Arg->getType());
-
unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
MachineInstrBuilder MIB =
- MIRBuilder.buildIntrinsic(Tys, ID, Res, !CI.doesNotAccessMemory());
+ MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
for (auto &Arg : CI.arg_operands()) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
unsigned Res = getOrCreateVReg(AI);
int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
- MIRBuilder.buildFrameIndex(LLT::pointer(0), Res, FI);
+ MIRBuilder.buildFrameIndex(Res, FI);
return true;
}
bool IRTranslator::translatePHI(const User &U) {
const PHINode &PI = cast<PHINode>(U);
- auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, LLT{*U.getType()});
+ auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI);
MIB.addDef(getOrCreateVReg(PI));
PendingPHIs.emplace_back(&PI, MIB.getInstr());
bool IRTranslator::translate(const Constant &C, unsigned Reg) {
if (auto CI = dyn_cast<ConstantInt>(&C))
- EntryBuilder.buildConstant(LLT{*CI->getType()}, Reg, CI->getZExtValue());
+ EntryBuilder.buildConstant(Reg, CI->getZExtValue());
else if (auto CF = dyn_cast<ConstantFP>(&C))
- EntryBuilder.buildFConstant(LLT{*CF->getType()}, Reg, *CF);
+ EntryBuilder.buildFConstant(Reg, *CF);
else if (isa<UndefValue>(C))
EntryBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(Reg);
else if (isa<ConstantPointerNull>(C))
- EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT, LLT{*C.getType()})
+ EntryBuilder.buildInstr(TargetOpcode::G_CONSTANT)
.addDef(Reg)
.addImm(0);
else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
// The RegBankSelected property is already checked in the verifier. Note
// that it has the same layering problem, but we only use inline methods so
// end up not needing to link against the GlobalISel library.
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
if (const MachineLegalizer *MLI = MF.getSubtarget().getMachineLegalizer())
for (const MachineBasicBlock &MBB : MF)
for (const MachineInstr &MI : MBB)
- if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI))
+ if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI))
reportSelectionError(MI, "Instruction is not legal");
#endif
// the vreg instead, but that's not ideal either, because it's saying that
// vregs have types, which they really don't. But then again, LLT is just
// a size and a "shape": it's probably the same information as regbank info.
- MF.getRegInfo().clearVirtRegSizes();
+ MF.getRegInfo().clearVirtRegTypes();
// FIXME: Should we accurately track changes?
return true;
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetSubtargetInfo.h"
void MachineIRBuilder::setMF(MachineFunction &MF) {
this->MF = &MF;
this->MBB = nullptr;
+ this->MRI = &MF.getRegInfo();
this->TII = MF.getSubtarget().getInstrInfo();
this->DL = DebugLoc();
this->MI = nullptr;
// Build instruction variants.
//------------------------------------------------------------------------------
-MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode,
- ArrayRef<LLT> Tys) {
+MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) {
MachineInstrBuilder MIB = BuildMI(getMF(), DL, getTII().get(Opcode));
- if (Tys.size() > 0) {
- assert(isPreISelGenericOpcode(Opcode) &&
- "Only generic instruction can have a type");
- for (unsigned i = 0; i < Tys.size(); ++i)
- MIB->setType(Tys[i], i);
- } else
- assert(!isPreISelGenericOpcode(Opcode) &&
- "Generic instruction must have a type");
getMBB().insert(getInsertPt(), MIB);
if (InsertedInstr)
InsertedInstr(MIB);
return MIB;
}
-MachineInstrBuilder MachineIRBuilder::buildFrameIndex(LLT Ty, unsigned Res,
- int Idx) {
- return buildInstr(TargetOpcode::G_FRAME_INDEX, Ty)
+MachineInstrBuilder MachineIRBuilder::buildFrameIndex(unsigned Res, int Idx) {
+ return buildInstr(TargetOpcode::G_FRAME_INDEX)
.addDef(Res)
.addFrameIndex(Idx);
}
-MachineInstrBuilder MachineIRBuilder::buildAdd(LLT Ty, unsigned Res,
- unsigned Op0, unsigned Op1) {
- return buildInstr(TargetOpcode::G_ADD, Ty)
+MachineInstrBuilder MachineIRBuilder::buildAdd(unsigned Res, unsigned Op0,
+ unsigned Op1) {
+ return buildInstr(TargetOpcode::G_ADD)
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
}
-MachineInstrBuilder MachineIRBuilder::buildSub(LLT Ty, unsigned Res,
- unsigned Op0, unsigned Op1) {
- return buildInstr(TargetOpcode::G_SUB, Ty)
+MachineInstrBuilder MachineIRBuilder::buildSub(unsigned Res, unsigned Op0,
+ unsigned Op1) {
+ return buildInstr(TargetOpcode::G_SUB)
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
}
-MachineInstrBuilder MachineIRBuilder::buildMul(LLT Ty, unsigned Res,
- unsigned Op0, unsigned Op1) {
- return buildInstr(TargetOpcode::G_MUL, Ty)
+MachineInstrBuilder MachineIRBuilder::buildMul(unsigned Res, unsigned Op0,
+ unsigned Op1) {
+ return buildInstr(TargetOpcode::G_MUL)
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
}
MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
- return buildInstr(TargetOpcode::G_BR, LLT::unsized()).addMBB(&Dest);
+ return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
}
MachineInstrBuilder MachineIRBuilder::buildCopy(unsigned Res, unsigned Op) {
return buildInstr(TargetOpcode::COPY).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildConstant(LLT Ty, unsigned Res,
- int64_t Val) {
- return buildInstr(TargetOpcode::G_CONSTANT, Ty).addDef(Res).addImm(Val);
+MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res, int64_t Val) {
+ return buildInstr(TargetOpcode::G_CONSTANT).addDef(Res).addImm(Val);
}
-MachineInstrBuilder MachineIRBuilder::buildFConstant(LLT Ty, unsigned Res,
- const ConstantFP &Val) {
- return buildInstr(TargetOpcode::G_FCONSTANT, Ty).addDef(Res).addFPImm(&Val);
+MachineInstrBuilder MachineIRBuilder::buildFConstant(unsigned Res,
+ const ConstantFP &Val) {
+ return buildInstr(TargetOpcode::G_FCONSTANT).addDef(Res).addFPImm(&Val);
}
-MachineInstrBuilder MachineIRBuilder::buildBrCond(LLT Ty, unsigned Tst,
+MachineInstrBuilder MachineIRBuilder::buildBrCond(unsigned Tst,
MachineBasicBlock &Dest) {
- return buildInstr(TargetOpcode::G_BRCOND, Ty).addUse(Tst).addMBB(&Dest);
+ return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
}
-
- MachineInstrBuilder MachineIRBuilder::buildLoad(LLT VTy, LLT PTy, unsigned Res,
- unsigned Addr,
- MachineMemOperand &MMO) {
- return buildInstr(TargetOpcode::G_LOAD, {VTy, PTy})
+MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr,
+ MachineMemOperand &MMO) {
+ return buildInstr(TargetOpcode::G_LOAD)
.addDef(Res)
.addUse(Addr)
.addMemOperand(&MMO);
}
-MachineInstrBuilder MachineIRBuilder::buildStore(LLT VTy, LLT PTy,
- unsigned Val, unsigned Addr,
- MachineMemOperand &MMO) {
- return buildInstr(TargetOpcode::G_STORE, {VTy, PTy})
+MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
+ MachineMemOperand &MMO) {
+ return buildInstr(TargetOpcode::G_STORE)
.addUse(Val)
.addUse(Addr)
.addMemOperand(&MMO);
}
-MachineInstrBuilder
-MachineIRBuilder::buildUAdde(ArrayRef<LLT> Tys, unsigned Res, unsigned CarryOut,
- unsigned Op0, unsigned Op1, unsigned CarryIn) {
- return buildInstr(TargetOpcode::G_UADDE, Tys)
+MachineInstrBuilder MachineIRBuilder::buildUAdde(unsigned Res,
+ unsigned CarryOut,
+ unsigned Op0, unsigned Op1,
+ unsigned CarryIn) {
+ return buildInstr(TargetOpcode::G_UADDE)
.addDef(Res)
.addDef(CarryOut)
.addUse(Op0)
.addUse(CarryIn);
}
-MachineInstrBuilder MachineIRBuilder::buildType(LLT Ty,
- unsigned Res, unsigned Op) {
- return buildInstr(TargetOpcode::G_TYPE, Ty).addDef(Res).addUse(Op);
+MachineInstrBuilder MachineIRBuilder::buildType(unsigned Res, unsigned Op) {
+ return buildInstr(TargetOpcode::G_TYPE).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildAnyExt(ArrayRef<LLT> Tys,
- unsigned Res, unsigned Op) {
- validateTruncExt(Tys, true);
- return buildInstr(TargetOpcode::G_ANYEXT, Tys).addDef(Res).addUse(Op);
+MachineInstrBuilder MachineIRBuilder::buildAnyExt(unsigned Res, unsigned Op) {
+ validateTruncExt(Res, Op, true);
+ return buildInstr(TargetOpcode::G_ANYEXT).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildSExt(ArrayRef<LLT> Tys, unsigned Res,
- unsigned Op) {
- validateTruncExt(Tys, true);
- return buildInstr(TargetOpcode::G_SEXT, Tys).addDef(Res).addUse(Op);
+MachineInstrBuilder MachineIRBuilder::buildSExt(unsigned Res, unsigned Op) {
+ validateTruncExt(Res, Op, true);
+ return buildInstr(TargetOpcode::G_SEXT).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildZExt(ArrayRef<LLT> Tys, unsigned Res,
- unsigned Op) {
- validateTruncExt(Tys, true);
- return buildInstr(TargetOpcode::G_ZEXT, Tys).addDef(Res).addUse(Op);
+MachineInstrBuilder MachineIRBuilder::buildZExt(unsigned Res, unsigned Op) {
+ validateTruncExt(Res, Op, true);
+ return buildInstr(TargetOpcode::G_ZEXT).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildExtract(ArrayRef<LLT> ResTys,
- ArrayRef<unsigned> Results,
+MachineInstrBuilder MachineIRBuilder::buildExtract(ArrayRef<unsigned> Results,
ArrayRef<uint64_t> Indices,
- LLT SrcTy, unsigned Src) {
- assert(ResTys.size() == Results.size() && Results.size() == Indices.size() &&
- "inconsistent number of regs");
+ unsigned Src) {
+ assert(Results.size() == Indices.size() && "inconsistent number of regs");
assert(!Results.empty() && "invalid trivial extract");
assert(std::is_sorted(Indices.begin(), Indices.end()) &&
"extract offsets must be in ascending order");
auto MIB = BuildMI(getMF(), DL, getTII().get(TargetOpcode::G_EXTRACT));
- for (unsigned i = 0; i < ResTys.size(); ++i)
- MIB->setType(LLT::scalar(ResTys[i].getSizeInBits()), i);
- MIB->setType(LLT::scalar(SrcTy.getSizeInBits()), ResTys.size());
-
for (auto Res : Results)
MIB.addDef(Res);
}
MachineInstrBuilder
-MachineIRBuilder::buildSequence(LLT ResTy, unsigned Res,
- ArrayRef<LLT> OpTys,
+MachineIRBuilder::buildSequence(unsigned Res,
ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Indices) {
- assert(OpTys.size() == Ops.size() && Ops.size() == Indices.size() &&
- "incompatible args");
+ assert(Ops.size() == Indices.size() && "incompatible args");
assert(!Ops.empty() && "invalid trivial sequence");
assert(std::is_sorted(Indices.begin(), Indices.end()) &&
"sequence offsets must be in ascending order");
- MachineInstrBuilder MIB =
- buildInstr(TargetOpcode::G_SEQUENCE, LLT::scalar(ResTy.getSizeInBits()));
+ MachineInstrBuilder MIB = buildInstr(TargetOpcode::G_SEQUENCE);
MIB.addDef(Res);
for (unsigned i = 0; i < Ops.size(); ++i) {
MIB.addUse(Ops[i]);
MIB.addImm(Indices[i]);
- MIB->setType(LLT::scalar(OpTys[i].getSizeInBits()), MIB->getNumTypes());
}
return MIB;
}
-MachineInstrBuilder MachineIRBuilder::buildIntrinsic(ArrayRef<LLT> Tys,
- Intrinsic::ID ID,
+MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
unsigned Res,
bool HasSideEffects) {
auto MIB =
buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
- : TargetOpcode::G_INTRINSIC,
- Tys);
+ : TargetOpcode::G_INTRINSIC);
if (Res)
MIB.addDef(Res);
MIB.addIntrinsicID(ID);
return MIB;
}
-MachineInstrBuilder MachineIRBuilder::buildTrunc(ArrayRef<LLT> Tys,
- unsigned Res, unsigned Op) {
- validateTruncExt(Tys, false);
- return buildInstr(TargetOpcode::G_TRUNC, Tys).addDef(Res).addUse(Op);
+MachineInstrBuilder MachineIRBuilder::buildTrunc(unsigned Res, unsigned Op) {
+ validateTruncExt(Res, Op, false);
+ return buildInstr(TargetOpcode::G_TRUNC).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildFPTrunc(ArrayRef<LLT> Tys,
- unsigned Res, unsigned Op) {
- validateTruncExt(Tys, false);
- return buildInstr(TargetOpcode::G_FPTRUNC, Tys).addDef(Res).addUse(Op);
+MachineInstrBuilder MachineIRBuilder::buildFPTrunc(unsigned Res, unsigned Op) {
+ validateTruncExt(Res, Op, false);
+ return buildInstr(TargetOpcode::G_FPTRUNC).addDef(Res).addUse(Op);
}
-MachineInstrBuilder MachineIRBuilder::buildICmp(ArrayRef<LLT> Tys,
- CmpInst::Predicate Pred,
+MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
unsigned Res, unsigned Op0,
unsigned Op1) {
- return buildInstr(TargetOpcode::G_ICMP, Tys)
+ return buildInstr(TargetOpcode::G_ICMP)
.addDef(Res)
.addPredicate(Pred)
.addUse(Op0)
.addUse(Op1);
}
-MachineInstrBuilder MachineIRBuilder::buildFCmp(ArrayRef<LLT> Tys,
- CmpInst::Predicate Pred,
+MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
unsigned Res, unsigned Op0,
unsigned Op1) {
- return buildInstr(TargetOpcode::G_FCMP, Tys)
+ return buildInstr(TargetOpcode::G_FCMP)
.addDef(Res)
.addPredicate(Pred)
.addUse(Op0)
.addUse(Op1);
}
-MachineInstrBuilder MachineIRBuilder::buildSelect(LLT Ty, unsigned Res,
- unsigned Tst,
+MachineInstrBuilder MachineIRBuilder::buildSelect(unsigned Res, unsigned Tst,
unsigned Op0, unsigned Op1) {
- return buildInstr(TargetOpcode::G_SELECT, {Ty, LLT::scalar(1)})
+ return buildInstr(TargetOpcode::G_SELECT)
.addDef(Res)
.addUse(Tst)
.addUse(Op0)
.addUse(Op1);
}
-void MachineIRBuilder::validateTruncExt(ArrayRef<LLT> Tys, bool IsExtend) {
+void MachineIRBuilder::validateTruncExt(unsigned Dst, unsigned Src,
+ bool IsExtend) {
#ifndef NDEBUG
- assert(Tys.size() == 2 && "cast should have a source and a dest type");
- LLT DstTy{Tys[0]}, SrcTy{Tys[1]};
+ LLT SrcTy = MRI->getType(Src);
+ LLT DstTy = MRI->getType(Dst);
if (DstTy.isVector()) {
assert(SrcTy.isVector() && "mismatched cast between vecot and non-vector");
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::legalizeInstrStep(MachineInstr &MI,
const MachineLegalizer &Legalizer) {
- auto Action = Legalizer.getAction(MI);
+ auto Action = Legalizer.getAction(MI, MRI);
switch (std::get<0>(Action)) {
case MachineLegalizer::Legal:
return AlreadyLegal;
SmallVectorImpl<unsigned> &VRegs) {
unsigned Size = Ty.getSizeInBits();
SmallVector<uint64_t, 4> Indexes;
- SmallVector<LLT, 4> ResTys;
for (int i = 0; i < NumParts; ++i) {
- VRegs.push_back(MRI.createGenericVirtualRegister(Size));
+ VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
Indexes.push_back(i * Size);
- ResTys.push_back(Ty);
}
- MIRBuilder.buildExtract(ResTys, VRegs, Indexes,
- LLT::scalar(Ty.getSizeInBits() * NumParts), Reg);
+ MIRBuilder.buildExtract(VRegs, Indexes, Reg);
}
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::libcall(MachineInstr &MI) {
- unsigned Size = MI.getType().getSizeInBits();
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ unsigned Size = Ty.getSizeInBits();
MIRBuilder.setInstr(MI);
switch (MI.getOpcode()) {
case TargetOpcode::G_ADD: {
// Expand in terms of carry-setting/consuming G_ADDE instructions.
unsigned NarrowSize = NarrowTy.getSizeInBits();
- int NumParts = MI.getType().getSizeInBits() / NarrowSize;
+ int NumParts = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() /
+ NarrowTy.getSizeInBits();
MIRBuilder.setInstr(MI);
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
- unsigned CarryIn = MRI.createGenericVirtualRegister(1);
- MIRBuilder.buildConstant(LLT::scalar(1), CarryIn, 0);
+ unsigned CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
+ MIRBuilder.buildConstant(CarryIn, 0);
- SmallVector<LLT, 2> DstTys;
for (int i = 0; i < NumParts; ++i) {
- unsigned DstReg = MRI.createGenericVirtualRegister(NarrowSize);
- unsigned CarryOut = MRI.createGenericVirtualRegister(1);
+ unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
+ unsigned CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
- MIRBuilder.buildUAdde(NarrowTy, DstReg, CarryOut, Src1Regs[i],
+ MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
Src2Regs[i], CarryIn);
- DstTys.push_back(NarrowTy);
DstRegs.push_back(DstReg);
Indexes.push_back(i * NarrowSize);
CarryIn = CarryOut;
}
- MIRBuilder.buildSequence(MI.getType(), MI.getOperand(0).getReg(), DstTys,
- DstRegs, Indexes);
+ unsigned DstReg = MI.getOperand(0).getReg();
+ MIRBuilder.buildSequence(DstReg, DstRegs, Indexes);
MI.eraseFromParent();
return Legalized;
}
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx,
LLT WideTy) {
- LLT Ty = MI.getType();
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
unsigned WideSize = WideTy.getSizeInBits();
MIRBuilder.setInstr(MI);
// Perform operation at larger width (any extension is fine here, high bits
// don't affect the result) and then truncate the result back to the
// original type.
- unsigned Src1Ext = MRI.createGenericVirtualRegister(WideSize);
- unsigned Src2Ext = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildAnyExt({WideTy, Ty}, Src1Ext, MI.getOperand(1).getReg());
- MIRBuilder.buildAnyExt({WideTy, Ty}, Src2Ext, MI.getOperand(2).getReg());
-
- unsigned DstExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildInstr(MI.getOpcode(), WideTy)
- .addDef(DstExt).addUse(Src1Ext).addUse(Src2Ext);
-
- MIRBuilder.buildTrunc({Ty, WideTy}, MI.getOperand(0).getReg(), DstExt);
+ unsigned Src1Ext = MRI.createGenericVirtualRegister(WideTy);
+ unsigned Src2Ext = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildAnyExt(Src1Ext, MI.getOperand(1).getReg());
+ MIRBuilder.buildAnyExt(Src2Ext, MI.getOperand(2).getReg());
+
+ unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildInstr(MI.getOpcode())
+ .addDef(DstExt)
+ .addUse(Src1Ext)
+ .addUse(Src2Ext);
+
+ MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
? TargetOpcode::G_SEXT
: TargetOpcode::G_ZEXT;
- unsigned LHSExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildInstr(ExtOp, {WideTy, MI.getType()})
- .addDef(LHSExt)
- .addUse(MI.getOperand(1).getReg());
+ unsigned LHSExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildInstr(ExtOp).addDef(LHSExt).addUse(
+ MI.getOperand(1).getReg());
- unsigned RHSExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildInstr(ExtOp, {WideTy, MI.getType()})
- .addDef(RHSExt)
- .addUse(MI.getOperand(2).getReg());
+ unsigned RHSExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildInstr(ExtOp).addDef(RHSExt).addUse(
+ MI.getOperand(2).getReg());
- unsigned ResExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildInstr(MI.getOpcode(), WideTy)
+ unsigned ResExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildInstr(MI.getOpcode())
.addDef(ResExt)
.addUse(LHSExt)
.addUse(RHSExt);
- MIRBuilder.buildTrunc({MI.getType(), WideTy}, MI.getOperand(0).getReg(),
- ResExt);
+ MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), ResExt);
MI.eraseFromParent();
return Legalized;
}
assert(alignTo(Ty.getSizeInBits(), 8) == WideSize &&
"illegal to increase number of bytes loaded");
- unsigned DstExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildLoad(WideTy, MI.getType(1), DstExt,
- MI.getOperand(1).getReg(), **MI.memoperands_begin());
- MIRBuilder.buildTrunc({Ty, WideTy}, MI.getOperand(0).getReg(), DstExt);
+ unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildLoad(DstExt, MI.getOperand(1).getReg(),
+ **MI.memoperands_begin());
+ MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
assert(alignTo(Ty.getSizeInBits(), 8) == WideSize &&
"illegal to increase number of bytes modified by a store");
- unsigned SrcExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildAnyExt({WideTy, Ty}, SrcExt, MI.getOperand(0).getReg());
- MIRBuilder.buildStore(WideTy, MI.getType(1), SrcExt,
- MI.getOperand(1).getReg(), **MI.memoperands_begin());
+ unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildAnyExt(SrcExt, MI.getOperand(0).getReg());
+ MIRBuilder.buildStore(SrcExt, MI.getOperand(1).getReg(),
+ **MI.memoperands_begin());
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_CONSTANT: {
- unsigned DstExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildConstant(WideTy, DstExt, MI.getOperand(1).getImm());
- MIRBuilder.buildTrunc({Ty, WideTy}, MI.getOperand(0).getReg(), DstExt);
+ unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildConstant(DstExt, MI.getOperand(1).getImm());
+ MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_FCONSTANT: {
- unsigned DstExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildFConstant(WideTy, DstExt, *MI.getOperand(1).getFPImm());
- MIRBuilder.buildFPTrunc({Ty, WideTy}, MI.getOperand(0).getReg(), DstExt);
+ unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildFConstant(DstExt, *MI.getOperand(1).getFPImm());
+ MIRBuilder.buildFPTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_BRCOND: {
- unsigned TstExt = MRI.createGenericVirtualRegister(WideSize);
- MIRBuilder.buildAnyExt({WideTy, Ty}, TstExt, MI.getOperand(0).getReg());
- MIRBuilder.buildBrCond(WideTy, TstExt, *MI.getOperand(1).getMBB());
+ unsigned TstExt = MRI.createGenericVirtualRegister(WideTy);
+ MIRBuilder.buildAnyExt(TstExt, MI.getOperand(0).getReg());
+ MIRBuilder.buildBrCond(TstExt, *MI.getOperand(1).getMBB());
MI.eraseFromParent();
return Legalized;
}
assert(TypeIdx == 1 && "unable to legalize predicate");
bool IsSigned = CmpInst::isSigned(
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()));
- unsigned Op0Ext = MRI.createGenericVirtualRegister(WideSize);
- unsigned Op1Ext = MRI.createGenericVirtualRegister(WideSize);
+ unsigned Op0Ext = MRI.createGenericVirtualRegister(WideTy);
+ unsigned Op1Ext = MRI.createGenericVirtualRegister(WideTy);
if (IsSigned) {
- MIRBuilder.buildSExt({WideTy, MI.getType(1)}, Op0Ext,
- MI.getOperand(2).getReg());
- MIRBuilder.buildSExt({WideTy, MI.getType(1)}, Op1Ext,
- MI.getOperand(3).getReg());
+ MIRBuilder.buildSExt(Op0Ext, MI.getOperand(2).getReg());
+ MIRBuilder.buildSExt(Op1Ext, MI.getOperand(3).getReg());
} else {
- MIRBuilder.buildZExt({WideTy, MI.getType(1)}, Op0Ext,
- MI.getOperand(2).getReg());
- MIRBuilder.buildZExt({WideTy, MI.getType(1)}, Op1Ext,
- MI.getOperand(3).getReg());
+ MIRBuilder.buildZExt(Op0Ext, MI.getOperand(2).getReg());
+ MIRBuilder.buildZExt(Op1Ext, MI.getOperand(3).getReg());
}
MIRBuilder.buildICmp(
- {MI.getType(0), WideTy},
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()),
MI.getOperand(0).getReg(), Op0Ext, Op1Ext);
MI.eraseFromParent();
MachineLegalizeHelper::LegalizeResult
MachineLegalizeHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
using namespace TargetOpcode;
- unsigned Size = Ty.getSizeInBits();
MIRBuilder.setInstr(MI);
switch(MI.getOpcode()) {
return UnableToLegalize;
case TargetOpcode::G_SREM:
case TargetOpcode::G_UREM: {
- unsigned QuotReg = MRI.createGenericVirtualRegister(Size);
- MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV, Ty)
+ unsigned QuotReg = MRI.createGenericVirtualRegister(Ty);
+ MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
.addDef(QuotReg)
.addUse(MI.getOperand(1).getReg())
.addUse(MI.getOperand(2).getReg());
- unsigned ProdReg = MRI.createGenericVirtualRegister(Size);
- MIRBuilder.buildMul(Ty, ProdReg, QuotReg, MI.getOperand(2).getReg());
- MIRBuilder.buildSub(Ty, MI.getOperand(0).getReg(),
- MI.getOperand(1).getReg(), ProdReg);
+ unsigned ProdReg = MRI.createGenericVirtualRegister(Ty);
+ MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
+ MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
+ ProdReg);
MI.eraseFromParent();
return Legalized;
}
return UnableToLegalize;
case TargetOpcode::G_ADD: {
unsigned NarrowSize = NarrowTy.getSizeInBits();
- int NumParts = MI.getType().getSizeInBits() / NarrowSize;
+ unsigned DstReg = MI.getOperand(0).getReg();
+ int NumParts = MRI.getType(DstReg).getSizeInBits() / NarrowSize;
MIRBuilder.setInstr(MI);
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
- SmallVector<LLT, 2> DstTys;
for (int i = 0; i < NumParts; ++i) {
- unsigned DstReg = MRI.createGenericVirtualRegister(NarrowSize);
- MIRBuilder.buildAdd(NarrowTy, DstReg, Src1Regs[i], Src2Regs[i]);
- DstTys.push_back(NarrowTy);
+ unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
+ MIRBuilder.buildAdd(DstReg, Src1Regs[i], Src2Regs[i]);
DstRegs.push_back(DstReg);
Indexes.push_back(i * NarrowSize);
}
- MIRBuilder.buildSequence(MI.getType(), MI.getOperand(0).getReg(), DstTys,
- DstRegs, Indexes);
+ MIRBuilder.buildSequence(DstReg, DstRegs, Indexes);
MI.eraseFromParent();
return Legalized;
}
SeqI.getOperand(2 * SeqIdx + 2).getImm() < ExtractPos)
++SeqIdx;
- if (SeqIdx == NumSeqSrcs ||
- SeqI.getOperand(2 * SeqIdx + 2).getImm() != ExtractPos ||
- SeqI.getType(SeqIdx + 1) != MI.getType(Idx)) {
+ if (SeqIdx == NumSeqSrcs) {
AllDefsReplaced = false;
continue;
}
unsigned OrigReg = SeqI.getOperand(2 * SeqIdx + 1).getReg();
+ if (SeqI.getOperand(2 * SeqIdx + 2).getImm() != ExtractPos ||
+ MRI.getType(OrigReg) != MRI.getType(ExtractReg)) {
+ AllDefsReplaced = false;
+ continue;
+ }
+
assert(!TargetRegisterInfo::isPhysicalRegister(OrigReg) &&
"unexpected physical register in G_SEQUENCE");
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/MachineLegalizer.h"
+
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Target/TargetOpcodes.h"
}
std::tuple<MachineLegalizer::LegalizeAction, unsigned, LLT>
-MachineLegalizer::getAction(const MachineInstr &MI) const {
- for (unsigned i = 0; i < MI.getNumTypes(); ++i) {
- auto Action = getAction({MI.getOpcode(), i, MI.getType(i)});
+MachineLegalizer::getAction(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI) const {
+ SmallBitVector SeenTypes(8);
+ const MCOperandInfo *OpInfo = MI.getDesc().OpInfo;
+ for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) {
+ if (!OpInfo[i].isGenericType())
+ continue;
+
+ // We don't want to repeatedly check the same operand index, that
+ // could get expensive.
+ unsigned TypeIdx = OpInfo[i].getGenericTypeIndex();
+ if (SeenTypes[TypeIdx])
+ continue;
+
+ SeenTypes.set(TypeIdx);
+
+ LLT Ty = MRI.getType(MI.getOperand(i).getReg());
+ auto Action = getAction({MI.getOpcode(), TypeIdx, Ty});
if (Action.first != Legal)
- return std::make_tuple(Action.first, i, Action.second);
+ return std::make_tuple(Action.first, TypeIdx, Action.second);
}
return std::make_tuple(Legal, 0, LLT{});
}
-bool MachineLegalizer::isLegal(const MachineInstr &MI) const {
- return std::get<0>(getAction(MI)) == Legal;
+bool MachineLegalizer::isLegal(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI) const {
+ return std::get<0>(getAction(MI, MRI)) == Legal;
}
LLT MachineLegalizer::findLegalType(const InstrAspect &Aspect,
// Legalized property, so it should be.
// FIXME: This should be in the MachineVerifier, but it can't use the
// MachineLegalizer as it's currently in the separate GlobalISel library.
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
if (const MachineLegalizer *MLI = MF.getSubtarget().getMachineLegalizer()) {
for (const MachineBasicBlock &MBB : MF) {
for (const MachineInstr &MI : MBB) {
- if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI)) {
+ if (isPreISelGenericOpcode(MI.getOpcode()) && !MLI->isLegal(MI, MRI)) {
if (!TPC->isGlobalISelAbortEnabled()) {
MF.getProperties().set(
MachineFunctionProperties::Property::FailedISel);
// get the size of that register class.
RC = TRI.getMinimalPhysRegClass(Reg);
} else {
- unsigned RegSize = MRI.getSize(Reg);
+ LLT Ty = MRI.getType(Reg);
+ unsigned RegSize = Ty.isSized() ? Ty.getSizeInBits() : 0;
// If Reg is not a generic register, query the register class to
// get its size.
if (RegSize)
for (unsigned &NewVReg : NewVRegsForOpIdx) {
assert(PartMap != PartMapList.end() && "Out-of-bound access");
assert(NewVReg == 0 && "Register has already been created");
- NewVReg = MRI.createGenericVirtualRegister(PartMap->Length);
+ NewVReg = MRI.createGenericVirtualRegister(LLT::scalar(PartMap->Length));
MRI.setRegBank(NewVReg, *PartMap->RegBank);
++PartMap;
}
if (Token.isError() || parseInstruction(OpCode, Flags))
return true;
- SmallVector<LLT, 1> Tys;
- if (isPreISelGenericOpcode(OpCode)) {
- // For generic opcode, at least one type is mandatory.
- auto Loc = Token.location();
- bool ManyTypes = Token.is(MIToken::lbrace);
- if (ManyTypes)
- lex();
-
- // Now actually parse the type(s).
- do {
- Tys.resize(Tys.size() + 1);
- if (parseLowLevelType(Loc, Tys[Tys.size() - 1]))
- return true;
- } while (ManyTypes && consumeIfPresent(MIToken::comma));
-
- if (ManyTypes)
- expectAndConsume(MIToken::rbrace);
- }
-
// Parse the remaining machine operands.
while (!Token.isNewlineOrEOF() && Token.isNot(MIToken::kw_debug_location) &&
Token.isNot(MIToken::coloncolon) && Token.isNot(MIToken::lbrace)) {
// TODO: Check for extraneous machine operands.
MI = MF.CreateMachineInstr(MCID, DebugLocation, /*NoImplicit=*/true);
MI->setFlags(Flags);
- if (Tys.size() > 0) {
- for (unsigned i = 0; i < Tys.size(); ++i)
- MI->setType(Tys[i], i);
- }
for (const auto &Operand : Operands)
MI->addOperand(MF, Operand.Operand);
if (assignRegisterTies(*MI, Operands))
if (MRI.getRegClassOrRegBank(Reg).is<const TargetRegisterClass *>())
return error("unexpected size on non-generic virtual register");
- unsigned Size;
- if (parseSize(Size))
+ LLT Ty;
+ if (parseLowLevelType(Token.location(), Ty))
+ return true;
+
+ if (expectAndConsume(MIToken::rparen))
return true;
- MRI.setSize(Reg, Size);
+ MRI.setType(Reg, Ty);
} else if (PFS.GenericVRegs.count(Reg)) {
// Generic virtual registers must have a size.
// If we end up here this means the size hasn't been specified and
if (StringRef(VReg.Class.Value).equals("_")) {
// This is a generic virtual register.
// The size will be set appropriately when we reach the definition.
- Reg = RegInfo.createGenericVirtualRegister(/*Size*/ 1);
+ Reg = RegInfo.createGenericVirtualRegister(LLT::scalar(1));
PFS.GenericVRegs.insert(Reg);
} else {
const auto *RC = getRegClass(MF, VReg.Class.Value);
VReg.Class.SourceRange.Start,
Twine("use of undefined register class or register bank '") +
VReg.Class.Value + "'");
- Reg = RegInfo.createGenericVirtualRegister(/*Size*/ 1);
+ Reg = RegInfo.createGenericVirtualRegister(LLT::scalar(1));
RegInfo.setRegBank(Reg, *RegBank);
PFS.GenericVRegs.insert(Reg);
}
VReg.Class = StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
else {
VReg.Class = std::string("_");
- assert(RegInfo.getSize(Reg) && "Generic registers must have a size");
+ assert(RegInfo.getType(Reg).isValid() &&
+ "Generic registers must have a valid type");
}
unsigned PreferredReg = RegInfo.getSimpleHint(Reg);
if (PreferredReg)
if (MI.getFlag(MachineInstr::FrameSetup))
OS << "frame-setup ";
OS << TII->getName(MI.getOpcode());
- if (isPreISelGenericOpcode(MI.getOpcode())) {
- assert(MI.getType().isValid() && "Generic instructions must have a type");
- unsigned NumTypes = MI.getNumTypes();
- OS << (NumTypes > 1 ? " {" : "") << ' ';
- for (unsigned i = 0; i < NumTypes; ++i) {
- MI.getType(i).print(OS);
- if (i + 1 != NumTypes)
- OS << ", ";
- }
- OS << (NumTypes > 1 ? " }" : "") << ' ';
- }
if (I < E)
OS << ' ';
if (ShouldPrintRegisterTies && Op.isTied() && !Op.isDef())
OS << "(tied-def " << Op.getParent()->findTiedOperandIdx(I) << ")";
assert((!IsDef || MRI) && "for IsDef, MRI must be provided");
- if (IsDef && MRI->getSize(Op.getReg()))
- OS << '(' << MRI->getSize(Op.getReg()) << ')';
+ if (IsDef && MRI->getType(Op.getReg()).isValid())
+ OS << '(' << MRI->getType(Op.getReg()) << ')';
break;
case MachineOperand::MO_Immediate:
OS << Op.getImm();
DebugLoc dl, bool NoImp)
: MCID(&tid), Parent(nullptr), Operands(nullptr), NumOperands(0), Flags(0),
AsmPrinterFlags(0), NumMemRefs(0), MemRefs(nullptr),
- debugLoc(std::move(dl))
-#ifdef LLVM_BUILD_GLOBAL_ISEL
- ,
- Tys(0)
-#endif
-{
+ debugLoc(std::move(dl)) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
// Reserve space for the expected number of operands.
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
: MCID(&MI.getDesc()), Parent(nullptr), Operands(nullptr), NumOperands(0),
Flags(0), AsmPrinterFlags(0), NumMemRefs(MI.NumMemRefs),
- MemRefs(MI.MemRefs), debugLoc(MI.getDebugLoc())
-#ifdef LLVM_BUILD_GLOBAL_ISEL
- ,
- Tys(0)
-#endif
-{
+ MemRefs(MI.MemRefs), debugLoc(MI.getDebugLoc()) {
assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
CapOperands = OperandCapacity::get(MI.getNumOperands());
return nullptr;
}
-// Implement dummy setter and getter for type when
-// global-isel is not built.
-// The proper implementation is WIP and is tracked here:
-// PR26576.
-#ifndef LLVM_BUILD_GLOBAL_ISEL
-unsigned MachineInstr::getNumTypes() const { return 0; }
-
-void MachineInstr::setType(LLT Ty, unsigned Idx) {}
-
-LLT MachineInstr::getType(unsigned Idx) const { return LLT{}; }
-
-void MachineInstr::removeTypes() {}
-
-#else
-unsigned MachineInstr::getNumTypes() const { return Tys.size(); }
-
-void MachineInstr::setType(LLT Ty, unsigned Idx) {
- assert((!Ty.isValid() || isPreISelGenericOpcode(getOpcode())) &&
- "Non generic instructions are not supposed to be typed");
- if (Tys.size() < Idx + 1)
- Tys.resize(Idx+1);
- Tys[Idx] = Ty;
-}
-
-LLT MachineInstr::getType(unsigned Idx) const { return Tys[Idx]; }
-
-void MachineInstr::removeTypes() {
- Tys.clear();
-}
-#endif // LLVM_BUILD_GLOBAL_ISEL
-
/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands already be on their use lists.
unsigned Reg = getOperand(StartOp).getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
VirtRegs.push_back(Reg);
- unsigned Size;
- if (MRI && (Size = MRI->getSize(Reg)))
- OS << '(' << Size << ')';
+ LLT Ty = MRI ? MRI->getType(Reg) : LLT{};
+ if (Ty.isValid())
+ OS << '(' << Ty << ')';
}
}
else
OS << "UNKNOWN";
- if (getNumTypes() > 0) {
- OS << " { ";
- for (unsigned i = 0; i < getNumTypes(); ++i) {
- getType(i).print(OS);
- if (i + 1 != getNumTypes())
- OS << ", ";
- }
- OS << " } ";
- }
-
if (SkipOpers)
return;
return Reg;
}
-unsigned
-MachineRegisterInfo::getSize(unsigned VReg) const {
- VRegToSizeMap::const_iterator SizeIt = getVRegToSize().find(VReg);
- return SizeIt != getVRegToSize().end() ? SizeIt->second : 0;
+LLT MachineRegisterInfo::getType(unsigned VReg) const {
+ VRegToTypeMap::const_iterator TypeIt = getVRegToType().find(VReg);
+ return TypeIt != getVRegToType().end() ? TypeIt->second : LLT{};
}
-void MachineRegisterInfo::setSize(unsigned VReg, unsigned Size) {
+void MachineRegisterInfo::setType(unsigned VReg, LLT Ty) {
// Check that VReg doesn't have a class.
assert(!getRegClassOrRegBank(VReg).is<const TargetRegisterClass *>() &&
"Can't set the size of a non-generic virtual register");
- getVRegToSize()[VReg] = Size;
+ getVRegToType()[VReg] = Ty;
}
unsigned
-MachineRegisterInfo::createGenericVirtualRegister(unsigned Size) {
- assert(Size && "Cannot create empty virtual register");
+MachineRegisterInfo::createGenericVirtualRegister(LLT Ty) {
+ assert(Ty.isValid() && "Cannot create empty virtual register");
// New virtual register number.
unsigned Reg = TargetRegisterInfo::index2VirtReg(getNumVirtRegs());
VRegInfo.grow(Reg);
// FIXME: Should we use a dummy register bank?
VRegInfo[Reg].first = static_cast<RegisterBank *>(nullptr);
- getVRegToSize()[Reg] = Size;
+ getVRegToType()[Reg] = Ty;
RegAllocHints.grow(Reg);
if (TheDelegate)
TheDelegate->MRI_NoteNewVirtualRegister(Reg);
return Reg;
}
-void MachineRegisterInfo::clearVirtRegSizes() {
+void MachineRegisterInfo::clearVirtRegTypes() {
#ifndef NDEBUG
// Verify that the size of the now-constrained vreg is unchanged.
- for (auto &VRegToSize : getVRegToSize()) {
- auto *RC = getRegClass(VRegToSize.first);
- if (VRegToSize.second != (RC->getSize() * 8))
+ for (auto &VRegToType : getVRegToType()) {
+ auto *RC = getRegClass(VRegToType.first);
+ if (VRegToType.second.isSized() &&
+ VRegToType.second.getSizeInBits() > (RC->getSize() * 8))
llvm_unreachable(
"Virtual register has explicit size different from its class size");
}
#endif
- getVRegToSize().clear();
+ getVRegToType().clear();
}
/// clearVirtRegs - Remove all virtual registers (after physreg assignment).
}
// Check types.
- const unsigned NumTypes = MI->getNumTypes();
if (isPreISelGenericOpcode(MCID.getOpcode())) {
if (isFunctionSelected)
report("Unexpected generic instruction in a Selected function", MI);
- if (NumTypes == 0)
- report("Generic instruction must have a type", MI);
- } else {
- if (NumTypes != 0)
- report("Non-generic instruction cannot have a type", MI);
+ // Generic instructions specify equality constraints between some
+ // of their operands. Make sure these are consistent.
+ SmallVector<LLT, 4> Types;
+ for (unsigned i = 0; i < MCID.getNumOperands(); ++i) {
+ if (!MCID.OpInfo[i].isGenericType())
+ continue;
+ size_t TypeIdx = MCID.OpInfo[i].getGenericTypeIndex();
+ Types.resize(std::max(TypeIdx + 1, Types.size()));
+
+ LLT OpTy = MRI->getType(MI->getOperand(i).getReg());
+ if (Types[TypeIdx].isValid() && Types[TypeIdx] != OpTy)
+ report("type mismatch in generic instruction", MI);
+ Types[TypeIdx] = OpTy;
+ }
}
// Generic opcodes must not have physical register operands.
}
// The gvreg must have a size and it must not have a SubIdx.
- unsigned Size = MRI->getSize(Reg);
- if (!Size) {
- report("Generic virtual register must have a size", MO, MONum);
+ LLT Ty = MRI->getType(Reg);
+ if (!Ty.isValid()) {
+ report("Generic virtual register must have a valid type", MO,
+ MONum);
return;
}
}
// Make sure the register fits into its register bank if any.
- if (RegBank && RegBank->getSize() < Size) {
+ if (RegBank && Ty.isSized() &&
+ RegBank->getSize() < Ty.getSizeInBits()) {
report("Register bank is too small for virtual register", MO,
MONum);
errs() << "Register bank " << RegBank->getName() << " too small("
- << RegBank->getSize() << ") to fit " << Size << "-bits\n";
+ << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
+ << "-bits\n";
return;
}
if (SubIdx) {
- report("Generic virtual register does not subregister index", MO, MONum);
+ report("Generic virtual register does not subregister index", MO,
+ MONum);
return;
}
break;
[](MachineIRBuilder &MIRBuilder, Type *Ty,
unsigned ValReg, unsigned PhysReg) {
MIRBuilder.getMBB().addLiveIn(PhysReg);
- MIRBuilder.buildType(LLT{*Ty}, ValReg, PhysReg);
+ MIRBuilder.buildCopy(ValReg, PhysReg);
});
}
handleAssignments(MIRBuilder, RetAssignFn, ResTys, ResRegs,
[&](MachineIRBuilder &MIRBuilder, Type *Ty,
unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.buildType(LLT{*Ty}, ValReg, PhysReg);
+ MIRBuilder.buildCopy(ValReg, PhysReg);
MIB.addDef(PhysReg, RegState::Implicit);
});
const AArch64RegisterBankInfo &RBI,
const MachineRegisterInfo &MRI,
const AArch64RegisterInfo &TRI) {
- if (!I.getType().isSized()) {
+ LLT Ty = MRI.getType(I.getOperand(0).getReg());
+ if (!Ty.isSized()) {
DEBUG(dbgs() << "Generic binop should be sized\n");
return true;
}
return false;
}
- const LLT Ty = I.getType();
+ const LLT Ty = I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg())
+ : LLT::unsized();
assert(Ty.isValid() && "Generic instruction doesn't have a type");
switch (I.getOpcode()) {
case TargetOpcode::G_BR: {
I.setDesc(TII.get(AArch64::B));
- I.removeTypes();
return true;
}
case TargetOpcode::G_TYPE: {
I.setDesc(TII.get(TargetOpcode::COPY));
- I.removeTypes();
return true;
}
case TargetOpcode::G_PHI: {
I.setDesc(TII.get(TargetOpcode::PHI));
- I.removeTypes();
return true;
}
case TargetOpcode::G_FRAME_INDEX: {
// allocas and G_FRAME_INDEX are only supported in addrspace(0).
- if (I.getType() != LLT::pointer(0)) {
- DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << I.getType()
+ if (Ty != LLT::pointer(0)) {
+ DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
<< ", expected: " << LLT::pointer(0) << '\n');
return false;
}
I.setDesc(TII.get(AArch64::ADDXri));
- I.removeTypes();
// MOs for a #0 shifted immediate.
I.addOperand(MachineOperand::CreateImm(0));
}
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE: {
- LLT MemTy = I.getType(0);
- LLT PtrTy = I.getType(1);
+ LLT MemTy = Ty;
+ LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
if (PtrTy != LLT::pointer(0)) {
DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
assert(PtrRB.getID() == AArch64::GPRRegBankID &&
"Load/Store pointer operand isn't a GPR");
- assert(MRI.getSize(PtrReg) == 64 &&
- "Load/Store pointer operand isn't 64-bit");
+ assert(MRI.getType(PtrReg).isPointer() &&
+ "Load/Store pointer operand isn't a pointer");
#endif
const unsigned ValReg = I.getOperand(0).getReg();
return false;
I.setDesc(TII.get(NewOpc));
- I.removeTypes();
I.addOperand(MachineOperand::CreateImm(0));
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
I.setDesc(TII.get(NewOpc));
- I.removeTypes();
I.addOperand(MachineOperand::CreateReg(ZeroReg, /*isDef=*/false));
I.setDesc(TII.get(NewOpc));
// FIXME: Should the type be always reset in setDesc?
- I.removeTypes();
// Now that we selected an opcode, we need to constrain the register
// operands to use appropriate classes.
#include "AArch64RegisterBankInfo.h"
#include "AArch64InstrInfo.h" // For XXXRegClassID.
+#include "llvm/CodeGen/LowLevelType.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
// As a top-level guess, vectors go in FPRs, scalars in GPRs. Obviously this
// won't work for normal floating-point types (or NZCV). When such
// instructions exist we'll need to look at the MI's opcode.
- LLT Ty = MI.getType();
+ auto &MRI = MI.getParent()->getParent()->getRegInfo();
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
unsigned BankID;
if (Ty.isVector())
BankID = AArch64::FPRRegBankID;
target triple = "aarch64-linux-gnu"
; CHECK-LABEL: name: args_i32
-; CHECK: %[[ARG0:[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w1
-; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w2
-; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w3
-; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w4
-; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w5
-; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w6
-; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w7
+; CHECK: %[[ARG0:[0-9]+]](s32) = COPY %w0
+; CHECK: %{{[0-9]+}}(s32) = COPY %w1
+; CHECK: %{{[0-9]+}}(s32) = COPY %w2
+; CHECK: %{{[0-9]+}}(s32) = COPY %w3
+; CHECK: %{{[0-9]+}}(s32) = COPY %w4
+; CHECK: %{{[0-9]+}}(s32) = COPY %w5
+; CHECK: %{{[0-9]+}}(s32) = COPY %w6
+; CHECK: %{{[0-9]+}}(s32) = COPY %w7
; CHECK: %w0 = COPY %[[ARG0]]
define i32 @args_i32(i32 %w0, i32 %w1, i32 %w2, i32 %w3,
}
; CHECK-LABEL: name: args_i64
-; CHECK: %[[ARG0:[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x1
-; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x2
-; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x3
-; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x4
-; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x5
-; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x6
-; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x7
+; CHECK: %[[ARG0:[0-9]+]](s64) = COPY %x0
+; CHECK: %{{[0-9]+}}(s64) = COPY %x1
+; CHECK: %{{[0-9]+}}(s64) = COPY %x2
+; CHECK: %{{[0-9]+}}(s64) = COPY %x3
+; CHECK: %{{[0-9]+}}(s64) = COPY %x4
+; CHECK: %{{[0-9]+}}(s64) = COPY %x5
+; CHECK: %{{[0-9]+}}(s64) = COPY %x6
+; CHECK: %{{[0-9]+}}(s64) = COPY %x7
; CHECK: %x0 = COPY %[[ARG0]]
define i64 @args_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3,
i64 %x4, i64 %x5, i64 %x6, i64 %x7) {
; CHECK-LABEL: name: args_ptrs
-; CHECK: %[[ARG0:[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x1
-; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x2
-; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x3
-; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x4
-; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x5
-; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x6
-; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x7
+; CHECK: %[[ARG0:[0-9]+]](p0) = COPY %x0
+; CHECK: %{{[0-9]+}}(p0) = COPY %x1
+; CHECK: %{{[0-9]+}}(p0) = COPY %x2
+; CHECK: %{{[0-9]+}}(p0) = COPY %x3
+; CHECK: %{{[0-9]+}}(p0) = COPY %x4
+; CHECK: %{{[0-9]+}}(p0) = COPY %x5
+; CHECK: %{{[0-9]+}}(p0) = COPY %x6
+; CHECK: %{{[0-9]+}}(p0) = COPY %x7
; CHECK: %x0 = COPY %[[ARG0]]
define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3,
[3 x float]* %x4, double* %x5, i8* %x6, i8* %x7) {
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_ADD s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_ADD %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_ADD s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_ADD %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_SUB s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_SUB %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_SUB s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_SUB %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_OR s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_OR %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_OR s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_OR %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_XOR s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_XOR %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_XOR s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_XOR %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_AND s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_AND %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_AND s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_AND %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_SHL s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_SHL %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_SHL s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_SHL %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_LSHR s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_LSHR %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_LSHR s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_LSHR %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_ASHR s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_ASHR %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_ASHR s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_ASHR %0, %1
...
# Check that we select s32 GPR G_MUL. This is trickier than other binops because
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_MUL s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_MUL %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_MUL s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_MUL %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_SDIV s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_SDIV %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_SDIV s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_SDIV %0, %1
...
---
bb.0:
liveins: %w0, %w1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(32) = G_UDIV s32 %0, %1
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s32) = G_UDIV %0, %1
...
---
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_UDIV s64 %0, %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_UDIV %0, %1
...
---
bb.0:
liveins: %s0, %s1
- %0(32) = G_TYPE s32 %s0
- %1(32) = G_TYPE s32 %s1
- %2(32) = G_FADD s32 %0, %1
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FADD %0, %1
...
---
bb.0:
liveins: %d0, %d1
- %0(64) = G_TYPE s64 %d0
- %1(64) = G_TYPE s64 %d1
- %2(64) = G_FADD s64 %0, %1
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FADD %0, %1
...
---
bb.0:
liveins: %s0, %s1
- %0(32) = G_TYPE s32 %s0
- %1(32) = G_TYPE s32 %s1
- %2(32) = G_FSUB s32 %0, %1
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FSUB %0, %1
...
---
bb.0:
liveins: %d0, %d1
- %0(64) = G_TYPE s64 %d0
- %1(64) = G_TYPE s64 %d1
- %2(64) = G_FSUB s64 %0, %1
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FSUB %0, %1
...
---
bb.0:
liveins: %s0, %s1
- %0(32) = G_TYPE s32 %s0
- %1(32) = G_TYPE s32 %s1
- %2(32) = G_FMUL s32 %0, %1
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FMUL %0, %1
...
---
bb.0:
liveins: %d0, %d1
- %0(64) = G_TYPE s64 %d0
- %1(64) = G_TYPE s64 %d1
- %2(64) = G_FMUL s64 %0, %1
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FMUL %0, %1
...
---
bb.0:
liveins: %s0, %s1
- %0(32) = G_TYPE s32 %s0
- %1(32) = G_TYPE s32 %s1
- %2(32) = G_FDIV s32 %0, %1
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %s1
+ %2(s32) = G_FDIV %0, %1
...
---
bb.0:
liveins: %d0, %d1
- %0(64) = G_TYPE s64 %d0
- %1(64) = G_TYPE s64 %d1
- %2(64) = G_FDIV s64 %0, %1
+ %0(s64) = COPY %d0
+ %1(s64) = COPY %d1
+ %2(s64) = G_FDIV %0, %1
...
---
bb.0:
successors: %bb.0
- G_BR unsized %bb.0
+ G_BR %bb.0
...
---
bb.0:
liveins: %x0
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_LOAD { s64, p0 } %0 :: (load 8 from %ir.addr)
+ %0(p0) = COPY %x0
+ %1(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
...
bb.0:
liveins: %x0
- %0(64) = G_TYPE s64 %x0
- %1(32) = G_LOAD { s32, p0 } %0 :: (load 4 from %ir.addr)
+ %0(p0) = COPY %x0
+ %1(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
...
bb.0:
liveins: %x0, %x1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- G_STORE { s64, p0 } %1, %0 :: (store 8 into %ir.addr)
+ %0(p0) = COPY %x0
+ %1(s64) = COPY %x1
+ G_STORE %1, %0 :: (store 8 into %ir.addr)
...
bb.0:
liveins: %x0, %w1
- %0(64) = G_TYPE s64 %x0
- %1(32) = G_TYPE s32 %w1
- G_STORE { s32, p0 } %1, %0 :: (store 4 into %ir.addr)
+ %0(p0) = COPY %x0
+ %1(s32) = COPY %w1
+ G_STORE %1, %0 :: (store 4 into %ir.addr)
...
# CHECK: %0 = ADDXri %stack.0.ptr0, 0, 0
body: |
bb.0:
- %0(64) = G_FRAME_INDEX p0 %stack.0.ptr0
+ %0(p0) = G_FRAME_INDEX %stack.0.ptr0
...
---
; Tests for add.
; CHECK-LABEL: name: addi64
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_ADD s64 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s64) = COPY %x1
+; CHECK-NEXT: [[RES:%[0-9]+]](s64) = G_ADD [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @addi64(i64 %arg1, i64 %arg2) {
}
; CHECK-LABEL: name: muli64
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_MUL s64 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s64) = COPY %x1
+; CHECK-NEXT: [[RES:%[0-9]+]](s64) = G_MUL [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @muli64(i64 %arg1, i64 %arg2) {
; CHECK-NEXT: - { id: 1, name: ptr2, offset: 0, size: 8, alignment: 1 }
; CHECK-NEXT: - { id: 2, name: ptr3, offset: 0, size: 128, alignment: 8 }
; CHECK-NEXT: - { id: 3, name: ptr4, offset: 0, size: 1, alignment: 8 }
-; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 %stack.0.ptr1
-; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 %stack.1.ptr2
-; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 %stack.2.ptr3
-; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 %stack.3.ptr4
+; CHECK: %{{[0-9]+}}(p0) = G_FRAME_INDEX %stack.0.ptr1
+; CHECK: %{{[0-9]+}}(p0) = G_FRAME_INDEX %stack.1.ptr2
+; CHECK: %{{[0-9]+}}(p0) = G_FRAME_INDEX %stack.2.ptr3
+; CHECK: %{{[0-9]+}}(p0) = G_FRAME_INDEX %stack.3.ptr4
define void @allocai64() {
%ptr1 = alloca i64
%ptr2 = alloca i64, align 1
; CHECK-NEXT: successors: %[[END:[0-9a-zA-Z._-]+]]({{0x[a-f0-9]+ / 0x[a-f0-9]+}} = 100.00%)
;
; Check that we emit the correct branch.
-; CHECK: G_BR unsized %[[END]]
+; CHECK: G_BR %[[END]]
;
; Check that end contains the return instruction.
; CHECK: [[END]]:
; CHECK: %[[FALSE:[0-9a-zA-Z._-]+]]({{0x[a-f0-9]+ / 0x[a-f0-9]+}} = 50.00%)
;
; Check that we emit the correct branch.
-; CHECK: [[ADDR:%.*]](64) = G_TYPE p0 %x0
-; CHECK: [[TST:%.*]](1) = G_LOAD { s1, p0 } [[ADDR]]
-; CHECK: G_BRCOND s1 [[TST]], %[[TRUE]]
-; CHECK: G_BR unsized %[[FALSE]]
+; CHECK: [[ADDR:%.*]](p0) = COPY %x0
+; CHECK: [[TST:%.*]](s1) = G_LOAD [[ADDR]]
+; CHECK: G_BRCOND [[TST]], %[[TRUE]]
+; CHECK: G_BR %[[FALSE]]
;
; Check that each successor contains the return instruction.
; CHECK: [[TRUE]]:
; Tests for or.
; CHECK-LABEL: name: ori64
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_OR s64 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s64) = COPY %x1
+; CHECK-NEXT: [[RES:%[0-9]+]](s64) = G_OR [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @ori64(i64 %arg1, i64 %arg2) {
}
; CHECK-LABEL: name: ori32
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_OR s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_OR [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @ori32(i32 %arg1, i32 %arg2) {
; Tests for xor.
; CHECK-LABEL: name: xori64
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_XOR s64 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s64) = COPY %x1
+; CHECK-NEXT: [[RES:%[0-9]+]](s64) = G_XOR [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @xori64(i64 %arg1, i64 %arg2) {
}
; CHECK-LABEL: name: xori32
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_XOR s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_XOR [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @xori32(i32 %arg1, i32 %arg2) {
; Tests for and.
; CHECK-LABEL: name: andi64
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_AND s64 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s64) = COPY %x1
+; CHECK-NEXT: [[RES:%[0-9]+]](s64) = G_AND [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @andi64(i64 %arg1, i64 %arg2) {
}
; CHECK-LABEL: name: andi32
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_AND s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_AND [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @andi32(i32 %arg1, i32 %arg2) {
; Tests for sub.
; CHECK-LABEL: name: subi64
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_SUB s64 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s64) = COPY %x1
+; CHECK-NEXT: [[RES:%[0-9]+]](s64) = G_SUB [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @subi64(i64 %arg1, i64 %arg2) {
}
; CHECK-LABEL: name: subi32
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SUB s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_SUB [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @subi32(i32 %arg1, i32 %arg2) {
}
; CHECK-LABEL: name: ptrtoint
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[RES:%[0-9]+]](64) = G_PTRTOINT { s64, p0 } [[ARG1]]
+; CHECK: [[ARG1:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[RES:%[0-9]+]](s64) = G_PTRTOINT [[ARG1]]
; CHECK: %x0 = COPY [[RES]]
; CHECK: RET_ReallyLR implicit %x0
define i64 @ptrtoint(i64* %a) {
}
; CHECK-LABEL: name: inttoptr
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK: [[RES:%[0-9]+]](64) = G_INTTOPTR { p0, s64 } [[ARG1]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK: [[RES:%[0-9]+]](p0) = G_INTTOPTR [[ARG1]]
; CHECK: %x0 = COPY [[RES]]
; CHECK: RET_ReallyLR implicit %x0
define i64* @inttoptr(i64 %a) {
}
; CHECK-LABEL: name: trivial_bitcast
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE p0 %x0
+; CHECK: [[ARG1:%[0-9]+]](p0) = COPY %x0
; CHECK: %x0 = COPY [[ARG1]]
; CHECK: RET_ReallyLR implicit %x0
define i64* @trivial_bitcast(i8* %a) {
}
; CHECK-LABEL: name: trivial_bitcast_with_copy
-; CHECK: [[A:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: G_BR unsized %[[CAST:bb\.[0-9]+]]
+; CHECK: [[A:%[0-9]+]](p0) = COPY %x0
+; CHECK: G_BR %[[CAST:bb\.[0-9]+]]
; CHECK: [[CAST]]:
-; CHECK: {{%[0-9]+}}(64) = COPY [[A]]
-; CHECK: G_BR unsized %[[END:bb\.[0-9]+]]
+; CHECK: {{%[0-9]+}}(p0) = COPY [[A]]
+; CHECK: G_BR %[[END:bb\.[0-9]+]]
; CHECK: [[END]]:
define i64* @trivial_bitcast_with_copy(i8* %a) {
}
; CHECK-LABEL: name: bitcast
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK: [[RES1:%[0-9]+]](64) = G_BITCAST { <2 x s32>, s64 } [[ARG1]]
-; CHECK: [[RES2:%[0-9]+]](64) = G_BITCAST { s64, <2 x s32> } [[RES1]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK: [[RES1:%[0-9]+]](<2 x s32>) = G_BITCAST [[ARG1]]
+; CHECK: [[RES2:%[0-9]+]](s64) = G_BITCAST [[RES1]]
; CHECK: %x0 = COPY [[RES2]]
; CHECK: RET_ReallyLR implicit %x0
define i64 @bitcast(i64 %a) {
}
; CHECK-LABEL: name: trunc
-; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK: [[VEC:%[0-9]+]](128) = G_LOAD { <4 x s32>, p0 }
-; CHECK: [[RES1:%[0-9]+]](8) = G_TRUNC { s8, s64 } [[ARG1]]
-; CHECK: [[RES2:%[0-9]+]](64) = G_TRUNC { <4 x s16>, <4 x s32> } [[VEC]]
+; CHECK: [[ARG1:%[0-9]+]](s64) = COPY %x0
+; CHECK: [[VEC:%[0-9]+]](<4 x s32>) = G_LOAD
+; CHECK: [[RES1:%[0-9]+]](s8) = G_TRUNC [[ARG1]]
+; CHECK: [[RES2:%[0-9]+]](<4 x s16>) = G_TRUNC [[VEC]]
define void @trunc(i64 %a) {
%vecptr = alloca <4 x i32>
%vec = load <4 x i32>, <4 x i32>* %vecptr
}
; CHECK-LABEL: name: load
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[ADDR42:%[0-9]+]](64) = G_TYPE p42 %x1
-; CHECK: [[VAL1:%[0-9]+]](64) = G_LOAD { s64, p0 } [[ADDR]] :: (load 8 from %ir.addr, align 16)
-; CHECK: [[VAL2:%[0-9]+]](64) = G_LOAD { s64, p42 } [[ADDR42]] :: (load 8 from %ir.addr42)
-; CHECK: [[SUM:%.*]](64) = G_ADD s64 [[VAL1]], [[VAL2]]
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[ADDR42:%[0-9]+]](p42) = COPY %x1
+; CHECK: [[VAL1:%[0-9]+]](s64) = G_LOAD [[ADDR]] :: (load 8 from %ir.addr, align 16)
+; CHECK: [[VAL2:%[0-9]+]](s64) = G_LOAD [[ADDR42]] :: (load 8 from %ir.addr42)
+; CHECK: [[SUM:%.*]](s64) = G_ADD [[VAL1]], [[VAL2]]
; CHECK: %x0 = COPY [[SUM]]
; CHECK: RET_ReallyLR implicit %x0
define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
}
; CHECK-LABEL: name: store
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[ADDR42:%[0-9]+]](64) = G_TYPE p42 %x1
-; CHECK: [[VAL1:%[0-9]+]](64) = G_TYPE s64 %x2
-; CHECK: [[VAL2:%[0-9]+]](64) = G_TYPE s64 %x3
-; CHECK: G_STORE { s64, p0 } [[VAL1]], [[ADDR]] :: (store 8 into %ir.addr, align 16)
-; CHECK: G_STORE { s64, p42 } [[VAL2]], [[ADDR42]] :: (store 8 into %ir.addr42)
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[ADDR42:%[0-9]+]](p42) = COPY %x1
+; CHECK: [[VAL1:%[0-9]+]](s64) = COPY %x2
+; CHECK: [[VAL2:%[0-9]+]](s64) = COPY %x3
+; CHECK: G_STORE [[VAL1]], [[ADDR]] :: (store 8 into %ir.addr, align 16)
+; CHECK: G_STORE [[VAL2]], [[ADDR42]] :: (store 8 into %ir.addr42)
; CHECK: RET_ReallyLR
define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2) {
store i64 %val1, i64* %addr, align 16
}
; CHECK-LABEL: name: intrinsics
-; CHECK: [[CUR:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[BITS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[PTR:%[0-9]+]](64) = G_INTRINSIC { p0, s32 } intrinsic(@llvm.returnaddress), 0
-; CHECK: [[PTR_VEC:%[0-9]+]](64) = G_FRAME_INDEX p0 %stack.0.ptr.vec
-; CHECK: [[VEC:%[0-9]+]](64) = G_LOAD { <8 x s8>, p0 } [[PTR_VEC]]
-; CHECK: G_INTRINSIC_W_SIDE_EFFECTS { unsized, <8 x s8>, <8 x s8>, p0 } intrinsic(@llvm.aarch64.neon.st2), [[VEC]], [[VEC]], [[PTR]]
+; CHECK: [[CUR:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[BITS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[PTR:%[0-9]+]](p0) = G_INTRINSIC intrinsic(@llvm.returnaddress), 0
+; CHECK: [[PTR_VEC:%[0-9]+]](p0) = G_FRAME_INDEX %stack.0.ptr.vec
+; CHECK: [[VEC:%[0-9]+]](<8 x s8>) = G_LOAD [[PTR_VEC]]
+; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.st2), [[VEC]], [[VEC]], [[PTR]]
; CHECK: RET_ReallyLR
declare i8* @llvm.returnaddress(i32)
declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
}
; CHECK-LABEL: name: test_phi
-; CHECK: G_BRCOND s1 {{%.*}}, %[[TRUE:bb\.[0-9]+]]
-; CHECK: G_BR unsized %[[FALSE:bb\.[0-9]+]]
+; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+]]
+; CHECK: G_BR %[[FALSE:bb\.[0-9]+]]
; CHECK: [[TRUE]]:
-; CHECK: [[RES1:%[0-9]+]](32) = G_LOAD { s32, p0 }
+; CHECK: [[RES1:%[0-9]+]](s32) = G_LOAD
; CHECK: [[FALSE]]:
-; CHECK: [[RES2:%[0-9]+]](32) = G_LOAD { s32, p0 }
+; CHECK: [[RES2:%[0-9]+]](s32) = G_LOAD
-; CHECK: [[RES:%[0-9]+]](32) = G_PHI s32 [[RES1]], %[[TRUE]], [[RES2]], %[[FALSE]]
+; CHECK: [[RES:%[0-9]+]](s32) = G_PHI [[RES1]], %[[TRUE]], [[RES2]], %[[FALSE]]
; CHECK: %w0 = COPY [[RES]]
define i32 @test_phi(i32* %addr1, i32* %addr2, i1 %tst) {
br i1 %tst, label %true, label %false
; It's important that constants are after argument passing, but before the
; rest of the entry block.
; CHECK-LABEL: name: constant_int
-; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[ONE:%[0-9]+]](32) = G_CONSTANT s32 1
-; CHECK: G_BR unsized
+; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[ONE:%[0-9]+]](s32) = G_CONSTANT 1
+; CHECK: G_BR
-; CHECK: [[SUM1:%[0-9]+]](32) = G_ADD s32 [[IN]], [[ONE]]
-; CHECK: [[SUM2:%[0-9]+]](32) = G_ADD s32 [[IN]], [[ONE]]
-; CHECK: [[RES:%[0-9]+]](32) = G_ADD s32 [[SUM1]], [[SUM2]]
+; CHECK: [[SUM1:%[0-9]+]](s32) = G_ADD [[IN]], [[ONE]]
+; CHECK: [[SUM2:%[0-9]+]](s32) = G_ADD [[IN]], [[ONE]]
+; CHECK: [[RES:%[0-9]+]](s32) = G_ADD [[SUM1]], [[SUM2]]
; CHECK: %w0 = COPY [[RES]]
define i32 @constant_int(i32 %in) {
}
; CHECK-LABEL: name: constant_int_start
-; CHECK: [[TWO:%[0-9]+]](32) = G_CONSTANT s32 2
-; CHECK: [[ANSWER:%[0-9]+]](32) = G_CONSTANT s32 42
-; CHECK: [[RES:%[0-9]+]](32) = G_ADD s32 [[TWO]], [[ANSWER]]
+; CHECK: [[TWO:%[0-9]+]](s32) = G_CONSTANT 2
+; CHECK: [[ANSWER:%[0-9]+]](s32) = G_CONSTANT 42
+; CHECK: [[RES:%[0-9]+]](s32) = G_ADD [[TWO]], [[ANSWER]]
define i32 @constant_int_start() {
%res = add i32 2, 42
ret i32 %res
}
; CHECK-LABEL: name: test_undef
-; CHECK: [[UNDEF:%[0-9]+]](32) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
; CHECK: %w0 = COPY [[UNDEF]]
define i32 @test_undef() {
ret i32 undef
}
; CHECK-LABEL: name: test_constant_inttoptr
-; CHECK: [[ONE:%[0-9]+]](64) = G_CONSTANT s64 1
-; CHECK: [[PTR:%[0-9]+]](64) = G_INTTOPTR { p0, s64 } [[ONE]]
+; CHECK: [[ONE:%[0-9]+]](s64) = G_CONSTANT 1
+; CHECK: [[PTR:%[0-9]+]](p0) = G_INTTOPTR [[ONE]]
; CHECK: %x0 = COPY [[PTR]]
define i8* @test_constant_inttoptr() {
ret i8* inttoptr(i64 1 to i8*)
; This failed purely because the Constant -> VReg map was kept across
; functions, so reuse the "i64 1" from above.
; CHECK-LABEL: name: test_reused_constant
-; CHECK: [[ONE:%[0-9]+]](64) = G_CONSTANT s64 1
+; CHECK: [[ONE:%[0-9]+]](s64) = G_CONSTANT 1
; CHECK: %x0 = COPY [[ONE]]
define i64 @test_reused_constant() {
ret i64 1
}
; CHECK-LABEL: name: test_sext
-; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RES:%[0-9]+]](64) = G_SEXT { s64, s32 } [[IN]]
+; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RES:%[0-9]+]](s64) = G_SEXT [[IN]]
; CHECK: %x0 = COPY [[RES]]
define i64 @test_sext(i32 %in) {
%res = sext i32 %in to i64
}
; CHECK-LABEL: name: test_zext
-; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RES:%[0-9]+]](64) = G_ZEXT { s64, s32 } [[IN]]
+; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RES:%[0-9]+]](s64) = G_ZEXT [[IN]]
; CHECK: %x0 = COPY [[RES]]
define i64 @test_zext(i32 %in) {
%res = zext i32 %in to i64
}
; CHECK-LABEL: name: test_shl
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SHL s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_SHL [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @test_shl(i32 %arg1, i32 %arg2) {
; CHECK-LABEL: name: test_lshr
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_LSHR s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_LSHR [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @test_lshr(i32 %arg1, i32 %arg2) {
}
; CHECK-LABEL: name: test_ashr
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_ASHR s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_ASHR [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @test_ashr(i32 %arg1, i32 %arg2) {
}
; CHECK-LABEL: name: test_sdiv
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SDIV s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_SDIV [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @test_sdiv(i32 %arg1, i32 %arg2) {
}
; CHECK-LABEL: name: test_udiv
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_UDIV s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_UDIV [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @test_udiv(i32 %arg1, i32 %arg2) {
}
; CHECK-LABEL: name: test_srem
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SREM s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_SREM [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @test_srem(i32 %arg1, i32 %arg2) {
}
; CHECK-LABEL: name: test_urem
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_UREM s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %w1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_UREM [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @test_urem(i32 %arg1, i32 %arg2) {
}
; CHECK-LABEL: name: test_constant_null
-; CHECK: [[NULL:%[0-9]+]](64) = G_CONSTANT p0 0
+; CHECK: [[NULL:%[0-9]+]](p0) = G_CONSTANT 0
; CHECK: %x0 = COPY [[NULL]]
define i8* @test_constant_null() {
ret i8* null
}
; CHECK-LABEL: name: test_struct_memops
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[VAL:%[0-9]+]](64) = G_LOAD { s64, p0 } [[ADDR]] :: (load 8 from %ir.addr, align 4)
-; CHECK: G_STORE { s64, p0 } [[VAL]], [[ADDR]] :: (store 8 into %ir.addr, align 4)
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[VAL:%[0-9]+]](s64) = G_LOAD [[ADDR]] :: (load 8 from %ir.addr, align 4)
+; CHECK: G_STORE [[VAL]], [[ADDR]] :: (store 8 into %ir.addr, align 4)
define void @test_struct_memops({ i8, i32 }* %addr) {
%val = load { i8, i32 }, { i8, i32 }* %addr
store { i8, i32 } %val, { i8, i32 }* %addr
}
; CHECK-LABEL: name: test_i1_memops
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[VAL:%[0-9]+]](1) = G_LOAD { s1, p0 } [[ADDR]] :: (load 1 from %ir.addr)
-; CHECK: G_STORE { s1, p0 } [[VAL]], [[ADDR]] :: (store 1 into %ir.addr)
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[VAL:%[0-9]+]](s1) = G_LOAD [[ADDR]] :: (load 1 from %ir.addr)
+; CHECK: G_STORE [[VAL]], [[ADDR]] :: (store 1 into %ir.addr)
define void @test_i1_memops(i1* %addr) {
%val = load i1, i1* %addr
store i1 %val, i1* %addr
}
; CHECK-LABEL: name: int_comparison
-; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
-; CHECK: [[TST:%[0-9]+]](1) = G_ICMP { s1, s32 } intpred(ne), [[LHS]], [[RHS]]
-; CHECK: G_STORE { s1, p0 } [[TST]], [[ADDR]]
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
+; CHECK: [[TST:%[0-9]+]](s1) = G_ICMP intpred(ne), [[LHS]], [[RHS]]
+; CHECK: G_STORE [[TST]], [[ADDR]]
define void @int_comparison(i32 %a, i32 %b, i1* %addr) {
%res = icmp ne i32 %a, %b
store i1 %res, i1* %addr
}
; CHECK-LABEL: name: test_fadd
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FADD s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %s1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_FADD [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
define float @test_fadd(float %arg1, float %arg2) {
}
; CHECK-LABEL: name: test_fsub
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FSUB s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %s1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_FSUB [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
define float @test_fsub(float %arg1, float %arg2) {
}
; CHECK-LABEL: name: test_fmul
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FMUL s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %s1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_FMUL [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
define float @test_fmul(float %arg1, float %arg2) {
}
; CHECK-LABEL: name: test_fdiv
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FDIV s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %s1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_FDIV [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
define float @test_fdiv(float %arg1, float %arg2) {
}
; CHECK-LABEL: name: test_frem
-; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FREM s32 [[ARG1]], [[ARG2]]
+; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](s32) = COPY %s1
+; CHECK-NEXT: [[RES:%[0-9]+]](s32) = G_FREM [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
define float @test_frem(float %arg1, float %arg2) {
}
; CHECK-LABEL: name: test_sadd_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
-; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_SADDO { s32, s1 } [[LHS]], [[RHS]]
-; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
-; CHECK: G_STORE { s64, p0 } [[RES]], [[ADDR]]
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
+; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SADDO [[LHS]], [[RHS]]
+; CHECK: [[RES:%[0-9]+]](s64) = G_SEQUENCE [[VAL]], 0, [[OVERFLOW]], 32
+; CHECK: G_STORE [[RES]], [[ADDR]]
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
%res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %lhs, i32 %rhs)
}
; CHECK-LABEL: name: test_uadd_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
-; CHECK: [[ZERO:%[0-9]+]](1) = G_CONSTANT s1 0
-; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_UADDE { s32, s1 } [[LHS]], [[RHS]], [[ZERO]]
-; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
-; CHECK: G_STORE { s64, p0 } [[RES]], [[ADDR]]
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
+; CHECK: [[ZERO:%[0-9]+]](s1) = G_CONSTANT 0
+; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_UADDE [[LHS]], [[RHS]], [[ZERO]]
+; CHECK: [[RES:%[0-9]+]](s64) = G_SEQUENCE [[VAL]], 0, [[OVERFLOW]], 32
+; CHECK: G_STORE [[RES]], [[ADDR]]
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
%res = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %lhs, i32 %rhs)
}
; CHECK-LABEL: name: test_ssub_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[SUBR:%[0-9]+]](64) = G_TYPE p0 %x2
-; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_SSUBO { s32, s1 } [[LHS]], [[RHS]]
-; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
-; CHECK: G_STORE { s64, p0 } [[RES]], [[SUBR]]
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[SUBR:%[0-9]+]](p0) = COPY %x2
+; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SSUBO [[LHS]], [[RHS]]
+; CHECK: [[RES:%[0-9]+]](s64) = G_SEQUENCE [[VAL]], 0, [[OVERFLOW]], 32
+; CHECK: G_STORE [[RES]], [[SUBR]]
declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
%res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %lhs, i32 %rhs)
}
; CHECK-LABEL: name: test_usub_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[SUBR:%[0-9]+]](64) = G_TYPE p0 %x2
-; CHECK: [[ZERO:%[0-9]+]](1) = G_CONSTANT s1 0
-; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_USUBE { s32, s1 } [[LHS]], [[RHS]], [[ZERO]]
-; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
-; CHECK: G_STORE { s64, p0 } [[RES]], [[SUBR]]
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[SUBR:%[0-9]+]](p0) = COPY %x2
+; CHECK: [[ZERO:%[0-9]+]](s1) = G_CONSTANT 0
+; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_USUBE [[LHS]], [[RHS]], [[ZERO]]
+; CHECK: [[RES:%[0-9]+]](s64) = G_SEQUENCE [[VAL]], 0, [[OVERFLOW]], 32
+; CHECK: G_STORE [[RES]], [[SUBR]]
declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
%res = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %lhs, i32 %rhs)
}
; CHECK-LABEL: name: test_smul_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
-; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_SMULO { s32, s1 } [[LHS]], [[RHS]]
-; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
-; CHECK: G_STORE { s64, p0 } [[RES]], [[ADDR]]
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
+; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SMULO [[LHS]], [[RHS]]
+; CHECK: [[RES:%[0-9]+]](s64) = G_SEQUENCE [[VAL]], 0, [[OVERFLOW]], 32
+; CHECK: G_STORE [[RES]], [[ADDR]]
declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
%res = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %lhs, i32 %rhs)
}
; CHECK-LABEL: name: test_umul_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
-; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
-; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_UMULO { s32, s1 } [[LHS]], [[RHS]]
-; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
-; CHECK: G_STORE { s64, p0 } [[RES]], [[ADDR]]
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %w0
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
+; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_UMULO [[LHS]], [[RHS]]
+; CHECK: [[RES:%[0-9]+]](s64) = G_SEQUENCE [[VAL]], 0, [[OVERFLOW]], 32
+; CHECK: G_STORE [[RES]], [[ADDR]]
declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
%res = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %lhs, i32 %rhs)
}
; CHECK-LABEL: name: test_extractvalue
-; CHECK: [[STRUCT:%[0-9]+]](128) = G_LOAD { s128, p0 }
-; CHECK: [[RES:%[0-9]+]](32) = G_EXTRACT { s32, s128 } [[STRUCT]], 64
+; CHECK: [[STRUCT:%[0-9]+]](s128) = G_LOAD
+; CHECK: [[RES:%[0-9]+]](s32) = G_EXTRACT [[STRUCT]], 64
; CHECK: %w0 = COPY [[RES]]
%struct.nested = type {i8, { i8, i32 }, i32}
define i32 @test_extractvalue(%struct.nested* %addr) {
}
; CHECK-LABEL: name: test_extractvalue_agg
-; CHECK: [[STRUCT:%[0-9]+]](128) = G_LOAD { s128, p0 }
-; CHECK: [[RES:%[0-9]+]](64) = G_EXTRACT { s64, s128 } [[STRUCT]], 32
-; CHECK: G_STORE { s64, p0 } [[RES]]
+; CHECK: [[STRUCT:%[0-9]+]](s128) = G_LOAD
+; CHECK: [[RES:%[0-9]+]](s64) = G_EXTRACT [[STRUCT]], 32
+; CHECK: G_STORE [[RES]]
define void @test_extractvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
%struct = load %struct.nested, %struct.nested* %addr
%res = extractvalue %struct.nested %struct, 1
}
; CHECK-LABEL: name: test_insertvalue
-; CHECK: [[VAL:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[STRUCT:%[0-9]+]](128) = G_LOAD { s128, p0 }
-; CHECK: [[NEWSTRUCT:%[0-9]+]](128) = G_INSERT { s128, s32 } [[STRUCT]], [[VAL]], 64
-; CHECK: G_STORE { s128, p0 } [[NEWSTRUCT]],
+; CHECK: [[VAL:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[STRUCT:%[0-9]+]](s128) = G_LOAD
+; CHECK: [[NEWSTRUCT:%[0-9]+]](s128) = G_INSERT [[STRUCT]], [[VAL]], 64
+; CHECK: G_STORE [[NEWSTRUCT]],
define void @test_insertvalue(%struct.nested* %addr, i32 %val) {
%struct = load %struct.nested, %struct.nested* %addr
%newstruct = insertvalue %struct.nested %struct, i32 %val, 1, 1
}
; CHECK-LABEL: name: test_insertvalue_agg
-; CHECK: [[SMALLSTRUCT:%[0-9]+]](64) = G_LOAD { s64, p0 }
-; CHECK: [[STRUCT:%[0-9]+]](128) = G_LOAD { s128, p0 }
-; CHECK: [[RES:%[0-9]+]](128) = G_INSERT { s128, s64 } [[STRUCT]], [[SMALLSTRUCT]], 32
-; CHECK: G_STORE { s128, p0 } [[RES]]
+; CHECK: [[SMALLSTRUCT:%[0-9]+]](s64) = G_LOAD
+; CHECK: [[STRUCT:%[0-9]+]](s128) = G_LOAD
+; CHECK: [[RES:%[0-9]+]](s128) = G_INSERT [[STRUCT]], [[SMALLSTRUCT]], 32
+; CHECK: G_STORE [[RES]]
define void @test_insertvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
%smallstruct = load {i8, i32}, {i8, i32}* %addr2
%struct = load %struct.nested, %struct.nested* %addr
}
; CHECK-LABEL: name: test_select
-; CHECK: [[TST:%[0-9]+]](1) = G_TYPE s1 %w0
-; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w2
-; CHECK: [[RES:%[0-9]+]](32) = G_SELECT { s32, s1 } [[TST]], [[LHS]], [[RHS]]
+; CHECK: [[TST:%[0-9]+]](s1) = COPY %w0
+; CHECK: [[LHS:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w2
+; CHECK: [[RES:%[0-9]+]](s32) = G_SELECT [[TST]], [[LHS]], [[RHS]]
; CHECK: %w0 = COPY [[RES]]
define i32 @test_select(i1 %tst, i32 %lhs, i32 %rhs) {
%res = select i1 %tst, i32 %lhs, i32 %rhs
}
; CHECK-LABEL: name: test_fptosi
-; CHECK: [[FPADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[FP:%[0-9]+]](32) = G_LOAD { s32, p0 } [[FPADDR]]
-; CHECK: [[RES:%[0-9]+]](64) = G_FPTOSI { s64, s32 } [[FP]]
+; CHECK: [[FPADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[FP:%[0-9]+]](s32) = G_LOAD [[FPADDR]]
+; CHECK: [[RES:%[0-9]+]](s64) = G_FPTOSI [[FP]]
; CHECK: %x0 = COPY [[RES]]
define i64 @test_fptosi(float* %fp.addr) {
%fp = load float, float* %fp.addr
}
; CHECK-LABEL: name: test_fptoui
-; CHECK: [[FPADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[FP:%[0-9]+]](32) = G_LOAD { s32, p0 } [[FPADDR]]
-; CHECK: [[RES:%[0-9]+]](64) = G_FPTOUI { s64, s32 } [[FP]]
+; CHECK: [[FPADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[FP:%[0-9]+]](s32) = G_LOAD [[FPADDR]]
+; CHECK: [[RES:%[0-9]+]](s64) = G_FPTOUI [[FP]]
; CHECK: %x0 = COPY [[RES]]
define i64 @test_fptoui(float* %fp.addr) {
%fp = load float, float* %fp.addr
}
; CHECK-LABEL: name: test_sitofp
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[FP:%[0-9]+]](64) = G_SITOFP { s64, s32 } [[IN]]
-; CHECK: G_STORE { s64, p0 } [[FP]], [[ADDR]]
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[IN:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[FP:%[0-9]+]](s64) = G_SITOFP [[IN]]
+; CHECK: G_STORE [[FP]], [[ADDR]]
define void @test_sitofp(double* %addr, i32 %in) {
%fp = sitofp i32 %in to double
store double %fp, double* %addr
}
; CHECK-LABEL: name: test_uitofp
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w1
-; CHECK: [[FP:%[0-9]+]](64) = G_UITOFP { s64, s32 } [[IN]]
-; CHECK: G_STORE { s64, p0 } [[FP]], [[ADDR]]
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[IN:%[0-9]+]](s32) = COPY %w1
+; CHECK: [[FP:%[0-9]+]](s64) = G_UITOFP [[IN]]
+; CHECK: G_STORE [[FP]], [[ADDR]]
define void @test_uitofp(double* %addr, i32 %in) {
%fp = uitofp i32 %in to double
store double %fp, double* %addr
}
; CHECK-LABEL: name: test_fpext
-; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %s0
-; CHECK: [[RES:%[0-9]+]](64) = G_FPEXT { s64, s32 } [[IN]]
+; CHECK: [[IN:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s64) = G_FPEXT [[IN]]
; CHECK: %d0 = COPY [[RES]]
define double @test_fpext(float %in) {
%res = fpext float %in to double
}
; CHECK-LABEL: name: test_fptrunc
-; CHECK: [[IN:%[0-9]+]](64) = G_TYPE s64 %d0
-; CHECK: [[RES:%[0-9]+]](32) = G_FPTRUNC { s32, s64 } [[IN]]
+; CHECK: [[IN:%[0-9]+]](s64) = COPY %d0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FPTRUNC [[IN]]
; CHECK: %s0 = COPY [[RES]]
define float @test_fptrunc(double %in) {
%res = fptrunc double %in to float
}
; CHECK-LABEL: name: test_constant_float
-; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[TMP:%[0-9]+]](32) = G_FCONSTANT s32 float 1.500000e+00
-; CHECK: G_STORE { s32, p0 } [[TMP]], [[ADDR]]
+; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[TMP:%[0-9]+]](s32) = G_FCONSTANT float 1.500000e+00
+; CHECK: G_STORE [[TMP]], [[ADDR]]
define void @test_constant_float(float* %addr) {
store float 1.5, float* %addr
ret void
}
; CHECK-LABEL: name: float_comparison
-; CHECK: [[LHSADDR:%[0-9]+]](64) = G_TYPE p0 %x0
-; CHECK: [[RHSADDR:%[0-9]+]](64) = G_TYPE p0 %x1
-; CHECK: [[BOOLADDR:%[0-9]+]](64) = G_TYPE p0 %x2
-; CHECK: [[LHS:%[0-9]+]](32) = G_LOAD { s32, p0 } [[LHSADDR]]
-; CHECK: [[RHS:%[0-9]+]](32) = G_LOAD { s32, p0 } [[RHSADDR]]
-; CHECK: [[TST:%[0-9]+]](1) = G_FCMP { s1, s32 } floatpred(oge), [[LHS]], [[RHS]]
-; CHECK: G_STORE { s1, p0 } [[TST]], [[BOOLADDR]]
+; CHECK: [[LHSADDR:%[0-9]+]](p0) = COPY %x0
+; CHECK: [[RHSADDR:%[0-9]+]](p0) = COPY %x1
+; CHECK: [[BOOLADDR:%[0-9]+]](p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]](s32) = G_LOAD [[LHSADDR]]
+; CHECK: [[RHS:%[0-9]+]](s32) = G_LOAD [[RHSADDR]]
+; CHECK: [[TST:%[0-9]+]](s1) = G_FCMP floatpred(oge), [[LHS]], [[RHS]]
+; CHECK: G_STORE [[TST]], [[BOOLADDR]]
define void @float_comparison(float* %a.addr, float* %b.addr, i1* %bool.addr) {
%a = load float, float* %a.addr
%b = load float, float* %b.addr
body: |
bb.0.entry:
liveins: %x0
- ; CHECK: %1(32) = G_ADD s32 %0
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_ADD s32 %0, %0
+ ; CHECK: %1(s32) = G_ADD %0
+ %0(s32) = COPY %w0
+ %1(s32) = G_ADD %0, %0
...
---
body: |
bb.0.entry:
liveins: %d0
- ; CHECK: %0(64) = G_TYPE s64 %d0
- ; CHECK: %1(64) = G_ADD <2 x s32> %0
- %0(64) = G_TYPE s64 %d0
- %1(64) = G_ADD <2 x s32> %0, %0
+ ; CHECK: %0(<2 x s32>) = COPY %d0
+ ; CHECK: %1(<2 x s32>) = G_ADD %0
+ %0(<2 x s32>) = COPY %d0
+ %1(<2 x s32>) = G_ADD %0, %0
...
---
body: |
bb.0.entry:
liveins: %s0, %x0
- ; CHECK: %0(32) = G_TYPE s32 %s0
- ; CHECK-NEXT: %1(32) = G_TYPE s32 %w0
- ; CHECK-NEXT: %3(32) = COPY %0
- ; CHECK-NEXT: %2(32) = G_ADD s32 %3, %1
- %0(32) = G_TYPE s32 %s0
- %1(32) = G_TYPE s32 %w0
- %2(32) = G_ADD s32 %0, %1
+ ; CHECK: %0(s32) = COPY %s0
+ ; CHECK-NEXT: %1(s32) = COPY %w0
+ ; CHECK-NEXT: %3(s32) = COPY %0
+ ; CHECK-NEXT: %2(s32) = G_ADD %3, %1
+ %0(s32) = COPY %s0
+ %1(s32) = COPY %w0
+ %2(s32) = G_ADD %0, %1
...
# Check that we repair the assignment for %0 differently for both uses.
body: |
bb.0.entry:
liveins: %s0, %x0
- ; CHECK: %0(32) = G_TYPE s32 %s0
- ; CHECK-NEXT: %2(32) = COPY %0
- ; CHECK-NEXT: %3(32) = COPY %0
- ; CHECK-NEXT: %1(32) = G_ADD s32 %2, %3
- %0(32) = G_TYPE s32 %s0
- %1(32) = G_ADD s32 %0, %0
+ ; CHECK: %0(s32) = COPY %s0
+ ; CHECK-NEXT: %2(s32) = COPY %0
+ ; CHECK-NEXT: %3(s32) = COPY %0
+ ; CHECK-NEXT: %1(s32) = G_ADD %2, %3
+ %0(s32) = COPY %s0
+ %1(s32) = G_ADD %0, %0
...
---
body: |
bb.0.entry:
liveins: %w0
- ; CHECK: %0(32) = G_TYPE s32 %w0
- ; CHECK-NEXT: %2(32) = G_ADD s32 %0, %0
- ; CHECK-NEXT: %1(32) = COPY %2
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_ADD s32 %0, %0
+ ; CHECK: %0(s32) = COPY %w0
+ ; CHECK-NEXT: %2(s32) = G_ADD %0, %0
+ ; CHECK-NEXT: %1(s32) = COPY %2
+ %0(s32) = COPY %w0
+ %1(s32) = G_ADD %0, %0
...
---
- { id: 2, class: gpr32 }
- { id: 3, class: _ }
- { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0.entry:
successors: %bb.2.end, %bb.1.then
liveins: %x0, %x1, %w2
%0 = LDRWui killed %x0, 0 :: (load 4 from %ir.src)
- %1 = G_TYPE s64 %x1
- %2 = G_TYPE s32 %w2
+ %5(s32) = COPY %0
+ %1 = COPY %x1
+ %2 = COPY %w2
TBNZW killed %2, 0, %bb.2.end
bb.1.then:
successors: %bb.2.end
- %3(32) = G_ADD s32 %0, %0
+ %3(s32) = G_ADD %5, %5
bb.2.end:
- %4(32) = PHI %0, %bb.0.entry, %3, %bb.1.then
+ %4(s32) = PHI %0, %bb.0.entry, %3, %bb.1.then
STRWui killed %4, killed %1, 0 :: (store 4 into %ir.dst)
RET_ReallyLR
...
body: |
bb.0.entry:
liveins: %w0, %s0
- ; CHECK: %0(32) = G_TYPE s32 %w0
- ; CHECK-NEXT: %1(32) = G_TYPE s32 %s0
- ; CHECK-NEXT: %3(32) = COPY %1
- ; CHECK-NEXT: %2(32) = G_ADD s32 %0, %3
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %s0
- %2(32) = G_ADD s32 %0, %1
+ ; CHECK: %0(s32) = COPY %w0
+ ; CHECK-NEXT: %1(s32) = COPY %s0
+ ; CHECK-NEXT: %3(s32) = COPY %1
+ ; CHECK-NEXT: %2(s32) = G_ADD %0, %3
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %s0
+ %2(s32) = G_ADD %0, %1
...
---
body: |
bb.0.entry:
liveins: %w0
- ; CHECK: %0(32) = G_TYPE s32 %w0
- ; CHECK-NEXT: %1(32) = G_ADD s32 %0, %0
+ ; CHECK: %0(s32) = COPY %w0
+ ; CHECK-NEXT: %1(s32) = G_ADD %0, %0
; CHECK-NEXT: %s0 = COPY %1
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_ADD s32 %0, %0
+ %0(s32) = COPY %w0
+ %1(s32) = G_ADD %0, %0
%s0 = COPY %1
...
body: |
bb.0.entry:
liveins: %x0, %x1
- ; CHECK: %0(64) = G_TYPE s64 %x0
- ; CHECK-NEXT: %1(64) = G_TYPE s64 %x1
+ ; CHECK: %0(<2 x s32>) = COPY %x0
+ ; CHECK-NEXT: %1(<2 x s32>) = COPY %x1
; Fast mode tries to reuse the source of the copy for the destination.
; Now, the default mapping says that %0 and %1 need to be in FPR.
; The repairing code insert two copies to materialize that.
- ; FAST-NEXT: %3(64) = COPY %0
- ; FAST-NEXT: %4(64) = COPY %1
+ ; FAST-NEXT: %3(s64) = COPY %0
+ ; FAST-NEXT: %4(s64) = COPY %1
; The mapping of G_OR is on FPR.
- ; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4
+ ; FAST-NEXT: %2(<2 x s32>) = G_OR %3, %4
; Greedy mode remapped the instruction on the GPR bank.
- ; GREEDY-NEXT: %2(64) = G_OR <2 x s32> %0, %1
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_OR <2 x s32> %0, %1
+ ; GREEDY-NEXT: %2(<2 x s32>) = G_OR %0, %1
+ %0(<2 x s32>) = COPY %x0
+ %1(<2 x s32>) = COPY %x1
+ %2(<2 x s32>) = G_OR %0, %1
...
---
body: |
bb.0.entry:
liveins: %x0, %x1
- ; CHECK: %0(64) = G_TYPE s64 %x0
- ; CHECK-NEXT: %1(64) = G_TYPE s64 %x1
+ ; CHECK: %0(<2 x s32>) = COPY %x0
+ ; CHECK-NEXT: %1(<2 x s32>) = COPY %x1
; Fast mode tries to reuse the source of the copy for the destination.
; Now, the default mapping says that %0 and %1 need to be in FPR.
; The repairing code insert two copies to materialize that.
- ; FAST-NEXT: %3(64) = COPY %0
- ; FAST-NEXT: %4(64) = COPY %1
+ ; FAST-NEXT: %3(s64) = COPY %0
+ ; FAST-NEXT: %4(s64) = COPY %1
; The mapping of G_OR is on FPR.
- ; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4
+ ; FAST-NEXT: %2(<2 x s32>) = G_OR %3, %4
; Greedy mode remapped the instruction on the GPR bank.
- ; GREEDY-NEXT: %3(64) = G_OR <2 x s32> %0, %1
+ ; GREEDY-NEXT: %3(s64) = G_OR %0, %1
; We need to keep %2 into FPR because we do not know anything about it.
- ; GREEDY-NEXT: %2(64) = COPY %3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_OR <2 x s32> %0, %1
+ ; GREEDY-NEXT: %2(<2 x s32>) = COPY %3
+ %0(<2 x s32>) = COPY %x0
+ %1(<2 x s32>) = COPY %x1
+ %2(<2 x s32>) = G_OR %0, %1
...
---
bb.0:
liveins: %x0
- ; CHECK: %0 = G_TYPE s64 %x0
+ ; CHECK: %0 = COPY %x0
; CHECK-NEXT: %1 = ADDXrr %0, %0
; CHECK-NEXT: %x0 = COPY %1
; CHECK-NEXT: RET_ReallyLR implicit %x0
- %0 = G_TYPE s64 %x0
+ %0 = COPY %x0
%1 = ADDXrr %0, %0
%x0 = COPY %1
RET_ReallyLR implicit %x0
; CHECK-LABEL: name: test_simple_return
; CHECK: BL @simple_return_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit-def %x0
-; CHECK: [[RES:%[0-9]+]](64) = G_TYPE s64 %x0
+; CHECK: [[RES:%[0-9]+]](s64) = COPY %x0
; CHECK: %x0 = COPY [[RES]]
; CHECK: RET_ReallyLR implicit %x0
declare i64 @simple_return_callee()
}
; CHECK-LABEL: name: test_simple_arg
-; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0
; CHECK: %w0 = COPY [[IN]]
; CHECK: BL @simple_arg_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0
; CHECK: RET_ReallyLR
}
; CHECK-LABEL: name: test_indirect_call
-; CHECK: [[FUNC:%[0-9]+]](64) = G_TYPE p0 %x0
+; CHECK: [[FUNC:%[0-9]+]](p0) = COPY %x0
; CHECK: BLR [[FUNC]], csr_aarch64_aapcs, implicit-def %lr, implicit %sp
; CHECK: RET_ReallyLR
define void @test_indirect_call(void()* %func) {
}
; CHECK-LABEL: name: test_multiple_args
-; CHECK: [[IN:%[0-9]+]](64) = G_TYPE s64 %x0
-; CHECK: [[ANSWER:%[0-9]+]](32) = G_CONSTANT s32 42
+; CHECK: [[IN:%[0-9]+]](s64) = COPY %x0
+; CHECK: [[ANSWER:%[0-9]+]](s32) = G_CONSTANT 42
; CHECK: %w0 = COPY [[ANSWER]]
; CHECK: %x1 = COPY [[IN]]
; CHECK: BL @multiple_args_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit %x1
; CHECK-LABEL: name: test_scalar_add_big
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
- ; CHECK-DAG: [[CARRY0_32:%.*]](32) = G_CONSTANT s32 0
- ; CHECK-DAG: [[CARRY0:%[0-9]+]](1) = G_TRUNC { s1, s32 } [[CARRY0_32]]
- ; CHECK: [[RES_LO:%.*]](64), [[CARRY:%.*]](1) = G_UADDE s64 %0, %2, [[CARRY0]]
- ; CHECK: [[RES_HI:%.*]](64), {{%.*}}(1) = G_UADDE s64 %1, %3, [[CARRY]]
+ ; CHECK-DAG: [[CARRY0_32:%.*]](s32) = G_CONSTANT 0
+ ; CHECK-DAG: [[CARRY0:%[0-9]+]](s1) = G_TRUNC [[CARRY0_32]]
+ ; CHECK: [[RES_LO:%.*]](s64), [[CARRY:%.*]](s1) = G_UADDE %0, %2, [[CARRY0]]
+ ; CHECK: [[RES_HI:%.*]](s64), {{%.*}}(s1) = G_UADDE %1, %3, [[CARRY]]
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
; CHECK: %x0 = COPY [[RES_LO]]
; CHECK: %x1 = COPY [[RES_HI]]
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_TYPE s64 %x2
- %3(64) = G_TYPE s64 %x3
- %4(128) = G_SEQUENCE { s128, s64, s64 } %0, 0, %1, 64
- %5(128) = G_SEQUENCE { s128, s64, s64 } %2, 0, %3, 64
- %6(128) = G_ADD s128 %4, %5
- %7(64), %8(64) = G_EXTRACT { s64, s64, s128 } %6, 0, 64
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = COPY %x2
+ %3(s64) = COPY %x3
+ %4(s128) = G_SEQUENCE %0, 0, %1, 64
+ %5(s128) = G_SEQUENCE %2, 0, %3, 64
+ %6(s128) = G_ADD %4, %5
+ %7(s64), %8(s64) = G_EXTRACT %6, 0, 64
%x0 = COPY %7
%x1 = COPY %8
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_add_small
- ; CHECK: [[RES:%.*]](8) = G_ADD s8 %2, %3
+ ; CHECK: [[RES:%.*]](s8) = G_ADD %2, %3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(8) = G_TRUNC { s8, s64 } %0
- %3(8) = G_TRUNC { s8, s64 } %1
- %4(8) = G_ADD s8 %2, %3
- %5(64) = G_ANYEXT { s64, s8 } %4
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
+ %4(s8) = G_ADD %2, %3
+ %5(s64) = G_ANYEXT %4
%x0 = COPY %5
...
; CHECK-LABEL: name: test_vector_add
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
- ; CHECK: [[RES_LO:%.*]](128) = G_ADD <2 x s64> %0, %2
- ; CHECK: [[RES_HI:%.*]](128) = G_ADD <2 x s64> %1, %3
+ ; CHECK: [[RES_LO:%.*]](<2 x s64>) = G_ADD %0, %2
+ ; CHECK: [[RES_HI:%.*]](<2 x s64>) = G_ADD %1, %3
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
; CHECK: %q0 = COPY [[RES_LO]]
; CHECK: %q1 = COPY [[RES_HI]]
- %0(128) = G_TYPE s128 %q0
- %1(128) = G_TYPE s128 %q1
- %2(128) = G_TYPE s128 %q2
- %3(128) = G_TYPE s128 %q3
- %4(256) = G_SEQUENCE { s256, s128, s128 } %0, 0, %1, 128
- %5(256) = G_SEQUENCE { s256, s128, s128 } %2, 0, %3, 128
- %6(256) = G_ADD <4 x s64> %4, %5
- %7(128), %8(128) = G_EXTRACT { s128, s128, s256 } %6, 0, 128
+ %0(<2 x s64>) = COPY %q0
+ %1(<2 x s64>) = COPY %q1
+ %2(<2 x s64>) = COPY %q2
+ %3(<2 x s64>) = COPY %q3
+ %4(<4 x s64>) = G_SEQUENCE %0, 0, %1, 128
+ %5(<4 x s64>) = G_SEQUENCE %2, 0, %3, 128
+ %6(<4 x s64>) = G_ADD %4, %5
+ %7(<2 x s64>), %8(<2 x s64>) = G_EXTRACT %6, 0, 128
%q0 = COPY %7
%q1 = COPY %8
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_and_small
- ; CHECK: %4(8) = G_AND s8 %2, %3
+ ; CHECK: %4(s8) = G_AND %2, %3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(8) = G_TRUNC { s8, s64 } %0
- %3(8) = G_TRUNC { s8, s64 } %1
- %4(8) = G_AND s8 %2, %3
- %5(64) = G_ANYEXT { s64, s8 } %2
+ %0(s64) = G_TYPE %x0
+ %1(s64) = G_TYPE %x1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
+ %4(s8) = G_AND %2, %3
+ %5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x0
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x0
- %2(8) = G_TRUNC { s8, s64 } %0
- %3(8) = G_TRUNC { s8, s64 } %1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
- ; CHECK: %4(1) = G_ICMP { s1, s64 } intpred(sge), %0, %1
- %4(1) = G_ICMP { s1, s64 } intpred(sge), %0, %1
+ ; CHECK: %4(s1) = G_ICMP intpred(sge), %0, %1
+ %4(s1) = G_ICMP intpred(sge), %0, %1
- ; CHECK: [[LHS32:%[0-9]+]](32) = G_ZEXT { s32, s8 } %2
- ; CHECK: [[RHS32:%[0-9]+]](32) = G_ZEXT { s32, s8 } %3
- ; CHECK: %8(1) = G_ICMP { s1, s32 } intpred(ult), [[LHS32]], [[RHS32]]
- %8(1) = G_ICMP { s1, s8 } intpred(ult), %2, %3
+ ; CHECK: [[LHS32:%[0-9]+]](s32) = G_ZEXT %2
+ ; CHECK: [[RHS32:%[0-9]+]](s32) = G_ZEXT %3
+ ; CHECK: %8(s1) = G_ICMP intpred(ult), [[LHS32]], [[RHS32]]
+ %8(s1) = G_ICMP intpred(ult), %2, %3
...
bb.0.entry:
liveins: %w0, %w1, %x2, %x3
- %0(32) = G_TYPE s32 %w0
- %1(32) = G_TYPE s32 %w1
- %2(8) = G_TRUNC { s8, s32 } %0
+ %0(s32) = COPY %w0
+ %1(s32) = COPY %w1
+ %2(s8) = G_TRUNC %0
; Only one of these extracts can be eliminated, the offsets don't match
; properly in the other cases.
; CHECK-LABEL: name: test_combines
- ; CHECK: %3(32) = G_SEQUENCE { s32, s8 } %2, 1
- ; CHECK: %4(8) = G_EXTRACT { s8, s32 } %3, 0
+ ; CHECK: %3(s32) = G_SEQUENCE %2, 1
+ ; CHECK: %4(s8) = G_EXTRACT %3, 0
; CHECK-NOT: G_EXTRACT
- ; CHECK: %6(8) = G_EXTRACT { s8, s32 } %3, 2
- ; CHECK: %7(32) = G_ZEXT { s32, s8 } %2
- %3(32) = G_SEQUENCE { s32, s8 } %2, 1
- %4(8) = G_EXTRACT { s8, s32 } %3, 0
- %5(8) = G_EXTRACT { s8, s32 } %3, 1
- %6(8) = G_EXTRACT { s8, s32 } %3, 2
- %7(32) = G_ZEXT { s32, s8 } %5
+ ; CHECK: %6(s8) = G_EXTRACT %3, 2
+ ; CHECK: %7(s32) = G_ZEXT %2
+ %3(s32) = G_SEQUENCE %2, 1
+ %4(s8) = G_EXTRACT %3, 0
+ %5(s8) = G_EXTRACT %3, 1
+ %6(s8) = G_EXTRACT %3, 2
+ %7(s32) = G_ZEXT %5
; Similarly, here the types don't match.
- ; CHECK: %10(32) = G_SEQUENCE { s32, s16, s16 } %8, 0, %9, 16
- ; CHECK: %11(1) = G_EXTRACT { s1, s32 } %10, 0
- ; CHECK: %12(32) = G_EXTRACT { s32, s32 } %10, 0
- %8(16) = G_TRUNC { s16, s32 } %0
- %9(16) = G_ADD s16 %8, %8
- %10(32) = G_SEQUENCE { s32, s16, s16 } %8, 0, %9, 16
- %11(1) = G_EXTRACT { s1, s32 } %10, 0
- %12(32) = G_EXTRACT { s32, s32 } %10, 0
+ ; CHECK: %10(s32) = G_SEQUENCE %8, 0, %9, 16
+ ; CHECK: %11(s1) = G_EXTRACT %10, 0
+ ; CHECK: %12(s32) = G_EXTRACT %10, 0
+ %8(s16) = G_TRUNC %0
+ %9(s16) = G_ADD %8, %8
+ %10(s32) = G_SEQUENCE %8, 0, %9, 16
+ %11(s1) = G_EXTRACT %10, 0
+ %12(s32) = G_EXTRACT %10, 0
; CHECK-NOT: G_EXTRACT
- ; CHECK: %15(16) = G_ADD s16 %8, %9
- %13(16), %14(16) = G_EXTRACT { s16, s16, s32 } %10, 0, 16
- %15(16) = G_ADD s16 %13, %14
+ ; CHECK: %15(s16) = G_ADD %8, %9
+ %13(s16), %14(s16) = G_EXTRACT %10, 0, 16
+ %15(s16) = G_ADD %13, %14
- ; CHECK: %18(64) = G_EXTRACT { <2 x s32>, s128 } %17, 0
- ; CHECK: %19(64) = G_ADD <2 x s32> %18, %18
- %16(64) = G_TYPE s64 %x0
- %17(128) = G_SEQUENCE { s128, s64, s64 } %16, 0, %16, 64
- %18(64) = G_EXTRACT { <2 x s32>, s128 } %17, 0
- %19(64) = G_ADD <2 x s32> %18, %18
+ ; CHECK: %18(<2 x s32>) = G_EXTRACT %17, 0
+ ; CHECK: %19(<2 x s32>) = G_ADD %18, %18
+ %16(s64) = COPY %x0
+ %17(s128) = G_SEQUENCE %16, 0, %16, 64
+ %18(<2 x s32>) = G_EXTRACT %17, 0
+ %19(<2 x s32>) = G_ADD %18, %18
; CHECK-NOT: G_SEQUENCE
; CHECK-NOT: G_EXTRACT
- ; CHECK: %24(32) = G_ADD s32 %0, %20
- %20(32) = G_ADD s32 %0, %0
- %21(64) = G_SEQUENCE { s64, s32, s32 } %0, 0, %20, 32
- %22(32) = G_EXTRACT { s32, s64 } %21, 0
- %23(32) = G_EXTRACT { s32, s64 } %21, 32
- %24(32) = G_ADD s32 %22, %23
+ ; CHECK: %24(s32) = G_ADD %0, %20
+ %20(s32) = G_ADD %0, %0
+ %21(s64) = G_SEQUENCE %0, 0, %20, 32
+ %22(s32) = G_EXTRACT %21, 0
+ %23(s32) = G_EXTRACT %21, 32
+ %24(s32) = G_ADD %22, %23
...
body: |
bb.0.entry:
; CHECK-LABEL: name: test_constant
- ; CHECK: [[TMP:%[0-9]+]](32) = G_CONSTANT s32 0
- ; CHECK: %0(1) = G_TRUNC { s1, s32 } [[TMP]]
- ; CHECK: [[TMP:%[0-9]+]](32) = G_CONSTANT s32 42
- ; CHECK: %1(8) = G_TRUNC { s8, s32 } [[TMP]]
- ; CHECK: [[TMP:%[0-9]+]](32) = G_CONSTANT s32 65535
- ; CHECK: %2(16) = G_TRUNC { s16, s32 } [[TMP]]
- ; CHECK: %3(32) = G_CONSTANT s32 -1
- ; CHECK: %4(64) = G_CONSTANT s64 1
- ; CHECK: %5(64) = G_CONSTANT p0 0
+ ; CHECK: [[TMP:%[0-9]+]](s32) = G_CONSTANT 0
+ ; CHECK: %0(s1) = G_TRUNC [[TMP]]
+ ; CHECK: [[TMP:%[0-9]+]](s32) = G_CONSTANT 42
+ ; CHECK: %1(s8) = G_TRUNC [[TMP]]
+ ; CHECK: [[TMP:%[0-9]+]](s32) = G_CONSTANT 65535
+ ; CHECK: %2(s16) = G_TRUNC [[TMP]]
+ ; CHECK: %3(s32) = G_CONSTANT -1
+ ; CHECK: %4(s64) = G_CONSTANT 1
+ ; CHECK: %5(s64) = G_CONSTANT 0
- %0(1) = G_CONSTANT s1 0
- %1(8) = G_CONSTANT s8 42
- %2(16) = G_CONSTANT s16 65535
- %3(32) = G_CONSTANT s32 -1
- %4(64) = G_CONSTANT s64 1
- %5(64) = G_CONSTANT p0 0
+ %0(s1) = G_CONSTANT 0
+ %1(s8) = G_CONSTANT 42
+ %2(s16) = G_CONSTANT 65535
+ %3(s32) = G_CONSTANT -1
+ %4(s64) = G_CONSTANT 1
+ %5(s64) = G_CONSTANT 0
...
---
body: |
bb.0.entry:
; CHECK-LABEL: name: test_fconstant
- ; CHECK: %0(32) = G_FCONSTANT s32 float 1.000000e+00
- ; CHECK: %1(64) = G_FCONSTANT s64 double 2.000000e+00
- ; CHECK: [[TMP:%[0-9]+]](32) = G_FCONSTANT s32 half 0xH0000
- ; CHECK; %2(16) = G_FPTRUNC { s16, s32 } [[TMP]]
+ ; CHECK: %0(s32) = G_FCONSTANT float 1.000000e+00
+ ; CHECK: %1(s64) = G_FCONSTANT double 2.000000e+00
+ ; CHECK: [[TMP:%[0-9]+]](s32) = G_FCONSTANT half 0xH0000
+ ; CHECK; %2(s16) = G_FPTRUNC [[TMP]]
- %0(32) = G_FCONSTANT s32 float 1.0
- %1(64) = G_FCONSTANT s64 double 2.0
- %2(16) = G_FCONSTANT s16 half 0.0
+ %0(s32) = G_FCONSTANT float 1.0
+ %1(s64) = G_FCONSTANT double 2.0
+ %2(s16) = G_FCONSTANT half 0.0
...
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(32) = G_TRUNC { s8, s64 } %0
- %3(32) = G_TRUNC { s8, s64 } %1
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
- ; CHECK: [[LHS32:%[0-9]+]](32) = G_SEXT { s32, s8 } %2
- ; CHECK: [[RHS32:%[0-9]+]](32) = G_SEXT { s32, s8 } %3
- ; CHECK: [[QUOT32:%[0-9]+]](32) = G_SDIV s32 [[LHS32]], [[RHS32]]
- ; CHECK: [[RES:%[0-9]+]](8) = G_TRUNC { s8, s32 } [[QUOT32]]
- %4(8) = G_SDIV s8 %2, %3
+ ; CHECK: [[LHS32:%[0-9]+]](s32) = G_SEXT %2
+ ; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %3
+ ; CHECK: [[QUOT32:%[0-9]+]](s32) = G_SDIV [[LHS32]], [[RHS32]]
+ ; CHECK: [[RES:%[0-9]+]](s8) = G_TRUNC [[QUOT32]]
+ %4(s8) = G_SDIV %2, %3
- ; CHECK: [[LHS32:%[0-9]+]](32) = G_ZEXT { s32, s8 } %2
- ; CHECK: [[RHS32:%[0-9]+]](32) = G_ZEXT { s32, s8 } %3
- ; CHECK: [[QUOT32:%[0-9]+]](32) = G_UDIV s32 [[LHS32]], [[RHS32]]
- ; CHECK: [[RES:%[0-9]+]](8) = G_TRUNC { s8, s32 } [[QUOT32]]
- %5(8) = G_UDIV s8 %2, %3
+ ; CHECK: [[LHS32:%[0-9]+]](s32) = G_ZEXT %2
+ ; CHECK: [[RHS32:%[0-9]+]](s32) = G_ZEXT %3
+ ; CHECK: [[QUOT32:%[0-9]+]](s32) = G_UDIV [[LHS32]], [[RHS32]]
+ ; CHECK: [[RES:%[0-9]+]](s8) = G_TRUNC [[QUOT32]]
+ %5(s8) = G_UDIV %2, %3
...
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = G_TYPE s64 %x0
+ %0(s64) = G_TYPE %x0
- ; CHECK: %1(1) = G_TRUNC { s1, s64 } %0
- ; CHECK: %2(8) = G_TRUNC { s8, s64 } %0
- ; CHECK: %3(16) = G_TRUNC { s16, s64 } %0
- ; CHECK: %4(32) = G_TRUNC { s16, s64 } %0
- %1(1) = G_TRUNC { s1, s64 } %0
- %2(8) = G_TRUNC { s8, s64 } %0
- %3(16) = G_TRUNC { s16, s64 } %0
- %4(32) = G_TRUNC { s16, s64 } %0
+ ; CHECK: %1(s1) = G_TRUNC %0
+ ; CHECK: %2(s8) = G_TRUNC %0
+ ; CHECK: %3(s16) = G_TRUNC %0
+ ; CHECK: %4(s32) = G_TRUNC %0
+ %1(s1) = G_TRUNC %0
+ %2(s8) = G_TRUNC %0
+ %3(s16) = G_TRUNC %0
+ %4(s32) = G_TRUNC %0
- ; CHECK: %5(64) = G_ANYEXT { s64, s1 } %1
- ; CHECK: %6(64) = G_ZEXT { s64, s8 } %2
- ; CHECK: %7(64) = G_ANYEXT { s64, s16 } %3
- ; CHECK: %8(64) = G_SEXT { s64, s32 } %4
- %5(64) = G_ANYEXT { s64, s1 } %1
- %6(64) = G_ZEXT { s64, s8 } %2
- %7(64) = G_ANYEXT { s64, s16 } %3
- %8(64) = G_SEXT { s64, s32 } %4
+ ; CHECK: %5(s64) = G_ANYEXT %1
+ ; CHECK: %6(s64) = G_ZEXT %2
+ ; CHECK: %7(s64) = G_ANYEXT %3
+ ; CHECK: %8(s64) = G_SEXT %4
+ %5(s64) = G_ANYEXT %1
+ %6(s64) = G_ZEXT %2
+ %7(s64) = G_ANYEXT %3
+ %8(s64) = G_SEXT %4
- ; CHECK: %9(32) = G_SEXT { s32, s1 } %1
- ; CHECK: %10(32) = G_ZEXT { s32, s8 } %2
- ; CHECK: %11(32) = G_ANYEXT { s32, s16 } %3
- %9(32) = G_SEXT { s32, s1 } %1
- %10(32) = G_ZEXT { s32, s8 } %2
- %11(32) = G_ANYEXT { s32, s16 } %3
+ ; CHECK: %9(s32) = G_SEXT %1
+ ; CHECK: %10(s32) = G_ZEXT %2
+ ; CHECK: %11(s32) = G_ANYEXT %3
+ %9(s32) = G_SEXT %1
+ %10(s32) = G_ZEXT %2
+ %11(s32) = G_ANYEXT %3
- ; CHECK: %12(32) = G_ZEXT { s32, s1 } %1
- ; CHECK: %13(32) = G_ANYEXT { s32, s8 } %2
- ; CHECK: %14(32) = G_SEXT { s32, s16 } %3
- %12(32) = G_ZEXT { s32, s1 } %1
- %13(32) = G_ANYEXT { s32, s8 } %2
- %14(32) = G_SEXT { s32, s16 } %3
+ ; CHECK: %12(s32) = G_ZEXT %1
+ ; CHECK: %13(s32) = G_ANYEXT %2
+ ; CHECK: %14(s32) = G_SEXT %3
+ %12(s32) = G_ZEXT %1
+ %13(s32) = G_ANYEXT %2
+ %14(s32) = G_SEXT %3
- ; CHECK: %15(8) = G_ZEXT { s8, s1 } %1
- ; CHECK: %16(16) = G_ANYEXT { s16, s8 } %2
- %15(8) = G_ZEXT { s8, s1 } %1
- %16(16) = G_ANYEXT { s16, s8 } %2
+ ; CHECK: %15(s8) = G_ZEXT %1
+ ; CHECK: %16(s16) = G_ANYEXT %2
+ %15(s8) = G_ZEXT %1
+ %16(s16) = G_ANYEXT %2
- ; CHECK: %18(64) = G_FPEXT { s64, s32 } %17
- %17(32) = G_TRUNC { s32, s64 } %0
- %18(64) = G_FPEXT { s64, s32 } %17
+ ; CHECK: %18(s64) = G_FPEXT %17
+ %17(s32) = G_TRUNC %0
+ %18(s64) = G_FPEXT %17
...
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x0
+ %0(s64) = G_TYPE %x0
+ %1(s64) = G_TYPE %x0
- %2(8) = G_TRUNC { s32, s64 } %0
- %3(8) = G_TRUNC { s32, s64 } %1
+ %2(s32) = G_TRUNC %0
+ %3(s32) = G_TRUNC %1
- ; CHECK: %4(1) = G_FCMP { s1, s64 } floatpred(oge), %0, %1
- %4(1) = G_FCMP { s1, s64 } floatpred(oge), %0, %1
+ ; CHECK: %4(s1) = G_FCMP floatpred(oge), %0, %1
+ %4(s1) = G_FCMP floatpred(oge), %0, %1
- ; CHECK: %5(1) = G_FCMP { s1, s32 } floatpred(uno), %2, %3
- %5(1) = G_FCMP { s1, s32 } floatpred(uno), %2, %3
+ ; CHECK: %5(s1) = G_FCMP floatpred(uno), %2, %3
+ %5(s1) = G_FCMP floatpred(uno), %2, %3
...
bb.0:
liveins: %x0
; CHECK-LABEL: name: test_copy
- ; CHECK: %0(64) = G_TYPE s64 %x0
+ ; CHECK: %0(s64) = G_TYPE %x0
; CHECK-NEXT: %x0 = COPY %0
- %0(64) = G_TYPE s64 %x0
+ %0(s64) = G_TYPE %x0
%x0 = COPY %0
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_load
- %0(64) = G_TYPE s64 %x0
+ %0(p0) = COPY %x0
- ; CHECK: [[BIT8:%[0-9]+]](8) = G_LOAD { s8, p0 } %0 :: (load 1 from %ir.addr)
- ; CHECK: %1(1) = G_TRUNC { s1, s8 } [[BIT8]]
- %1(1) = G_LOAD { s1, p0 } %0 :: (load 1 from %ir.addr)
+ ; CHECK: [[BIT8:%[0-9]+]](s8) = G_LOAD %0 :: (load 1 from %ir.addr)
+ ; CHECK: %1(s1) = G_TRUNC [[BIT8]]
+ %1(s1) = G_LOAD %0 :: (load 1 from %ir.addr)
- ; CHECK: %2(8) = G_LOAD { s8, p0 } %0 :: (load 1 from %ir.addr)
- %2(8) = G_LOAD { s8, p0 } %0 :: (load 1 from %ir.addr)
+ ; CHECK: %2(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
+ %2(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
- ; CHECK: %3(16) = G_LOAD { s16, p0 } %0 :: (load 2 from %ir.addr)
- %3(16) = G_LOAD { s16, p0 } %0 :: (load 2 from %ir.addr)
+ ; CHECK: %3(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
+ %3(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
- ; CHECK: %4(32) = G_LOAD { s32, p0 } %0 :: (load 4 from %ir.addr)
- %4(32) = G_LOAD { s32, p0 } %0 :: (load 4 from %ir.addr)
+ ; CHECK: %4(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
+ %4(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
- ; CHECK: %5(64) = G_LOAD { s64, p0 } %0 :: (load 8 from %ir.addr)
- %5(64) = G_LOAD { s64, p0 } %0 :: (load 8 from %ir.addr)
+ ; CHECK: %5(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
+ %5(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
...
---
- { id: 2, class: _ }
- { id: 3, class: _ }
- { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_store
- %0(64) = G_TYPE s64 %x0
- %1(32) = G_TYPE s32 %w1
+ %0(p0) = COPY %x0
+ %1(s32) = COPY %w1
- ; CHECK: [[BIT8:%[0-9]+]](8) = G_ANYEXT { s8, s1 } %2
- ; CHECK: G_STORE { s8, p0 } [[BIT8]], %0 :: (store 1 into %ir.addr)
- %2(1) = G_TRUNC s1 %1
- G_STORE { s1, p0 } %2, %0 :: (store 1 into %ir.addr)
+ ; CHECK: [[BIT8:%[0-9]+]](s8) = G_ANYEXT %2
+ ; CHECK: G_STORE [[BIT8]], %0 :: (store 1 into %ir.addr)
+ %2(s1) = G_TRUNC %1
+ G_STORE %2, %0 :: (store 1 into %ir.addr)
- ; CHECK: G_STORE { s8, p0 } %3, %0 :: (store 1 into %ir.addr)
- %3(8) = G_TRUNC s8 %1
- G_STORE { s8, p0 } %3, %0 :: (store 1 into %ir.addr)
+ ; CHECK: G_STORE %3, %0 :: (store 1 into %ir.addr)
+ %3(s8) = G_TRUNC %1
+ G_STORE %3, %0 :: (store 1 into %ir.addr)
- ; CHECK: G_STORE { s16, p0 } %4, %0 :: (store 2 into %ir.addr)
- %4(16) = G_TRUNC s16 %1
- G_STORE { s16, p0 } %4, %0 :: (store 2 into %ir.addr)
+ ; CHECK: G_STORE %4, %0 :: (store 2 into %ir.addr)
+ %4(s16) = G_TRUNC %1
+ G_STORE %4, %0 :: (store 2 into %ir.addr)
- ; CHECK: G_STORE { s32, p0 } %1, %0 :: (store 4 into %ir.addr)
- G_STORE { s32, p0 } %1, %0 :: (store 4 into %ir.addr)
+ ; CHECK: G_STORE %1, %0 :: (store 4 into %ir.addr)
+ G_STORE %1, %0 :: (store 4 into %ir.addr)
- ; CHECK: G_STORE { s64, p0 } %0, %0 :: (store 8 into %ir.addr)
- G_STORE { s64, p0 } %0, %0 :: (store 8 into %ir.addr)
+ ; CHECK: G_STORE %5, %0 :: (store 8 into %ir.addr)
+ %5(s64) = G_PTRTOINT %0
+ G_STORE %5, %0 :: (store 8 into %ir.addr)
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_mul_small
- ; CHECK: %4(8) = G_MUL s8 %2, %3
+ ; CHECK: %4(s8) = G_MUL %2, %3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(8) = G_TRUNC { s8, s64 } %0
- %3(8) = G_TRUNC { s8, s64 } %1
- %4(8) = G_MUL s8 %2, %3
- %5(64) = G_ANYEXT { s64, s8 } %2
+ %0(s64) = G_TYPE %x0
+ %1(s64) = G_TYPE %x1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
+ %4(s8) = G_MUL %2, %3
+ %5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_or_small
- ; CHECK: %4(8) = G_OR s8 %2, %3
+ ; CHECK: %4(s8) = G_OR %2, %3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(8) = G_TRUNC { s8, s64 } %0
- %3(8) = G_TRUNC { s8, s64 } %1
- %4(8) = G_OR s8 %2, %3
- %5(64) = G_ANYEXT { s64, s8 } %2
+ %0(s64) = G_TYPE %x0
+ %1(s64) = G_TYPE %x1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
+ %4(s8) = G_OR %2, %3
+ %5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- ; CHECK: [[QUOT:%[0-9]+]](64) = G_UDIV s64 %0, %1
- ; CHECK: [[PROD:%[0-9]+]](64) = G_MUL s64 [[QUOT]], %1
- ; CHECK: [[RES:%[0-9]+]](64) = G_SUB s64 %0, [[PROD]]
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(64) = G_UREM s64 %0, %1
+ ; CHECK: [[QUOT:%[0-9]+]](s64) = G_UDIV %0, %1
+ ; CHECK: [[PROD:%[0-9]+]](s64) = G_MUL [[QUOT]], %1
+ ; CHECK: [[RES:%[0-9]+]](s64) = G_SUB %0, [[PROD]]
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s64) = G_UREM %0, %1
- ; CHECK: [[QUOT:%[0-9]+]](32) = G_SDIV s32 %3, %4
- ; CHECK: [[PROD:%[0-9]+]](32) = G_MUL s32 [[QUOT]], %4
- ; CHECK: [[RES:%[0-9]+]](32) = G_SUB s32 %3, [[PROD]]
- %3(32) = G_TRUNC { s32, s64 } %0
- %4(32) = G_TRUNC { s32, s64 } %1
- %5(32) = G_SREM s32 %3, %4
+ ; CHECK: [[QUOT:%[0-9]+]](s32) = G_SDIV %3, %4
+ ; CHECK: [[PROD:%[0-9]+]](s32) = G_MUL [[QUOT]], %4
+ ; CHECK: [[RES:%[0-9]+]](s32) = G_SUB %3, [[PROD]]
+ %3(s32) = G_TRUNC %0
+ %4(s32) = G_TRUNC %1
+ %5(s32) = G_SREM %3, %4
- ; CHECK: [[LHS32:%[0-9]+]](32) = G_SEXT { s32, s8 } %6
- ; CHECK: [[RHS32:%[0-9]+]](32) = G_SEXT { s32, s8 } %7
- ; CHECK: [[QUOT32:%[0-9]+]](32) = G_SDIV s32 [[LHS32]], [[RHS32]]
- ; CHECK: [[QUOT:%[0-9]+]](8) = G_TRUNC { s8, s32 } [[QUOT32]]
- ; CHECK: [[PROD:%[0-9]+]](8) = G_MUL s8 [[QUOT]], %7
- ; CHECK: [[RES:%[0-9]+]](8) = G_SUB s8 %6, [[PROD]]
- %6(8) = G_TRUNC { s8, s64 } %0
- %7(8) = G_TRUNC { s8, s64 } %1
- %8(8) = G_SREM s8 %6, %7
+ ; CHECK: [[LHS32:%[0-9]+]](s32) = G_SEXT %6
+ ; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %7
+ ; CHECK: [[QUOT32:%[0-9]+]](s32) = G_SDIV [[LHS32]], [[RHS32]]
+ ; CHECK: [[QUOT:%[0-9]+]](s8) = G_TRUNC [[QUOT32]]
+ ; CHECK: [[PROD:%[0-9]+]](s8) = G_MUL [[QUOT]], %7
+ ; CHECK: [[RES:%[0-9]+]](s8) = G_SUB %6, [[PROD]]
+ %6(s8) = G_TRUNC %0
+ %7(s8) = G_TRUNC %1
+ %8(s8) = G_SREM %6, %7
; CHECK: %d0 = COPY %0
; CHECK: %d1 = COPY %1
; CHECK: BL $fmod, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0
- ; CHECK: %9(64) = G_TYPE s64 %d0
- %9(64) = G_FREM s64 %0, %1
+ ; CHECK: %9(s64) = COPY %d0
+ %9(s64) = G_FREM %0, %1
; CHECK: %s0 = COPY %3
; CHECK: %s1 = COPY %4
; CHECK: BL $fmodf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0
- ; CHECK: %10(32) = G_TYPE s32 %s0
- %10(32) = G_FREM s32 %3, %4
+ ; CHECK: %10(s32) = COPY %s0
+ %10(s32) = G_FREM %3, %4
...
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = G_TYPE s64 %x0
+ %0(s64) = COPY %x0
; CHECK-LABEL: name: test_simple
- ; CHECK: %1(64) = G_PTRTOINT { s64, p0 } %0
- ; CHECK: %2(64) = G_INTTOPTR { p0, s64 } %1
- %1(64) = G_PTRTOINT { s64, p0 } %0
- %2(64) = G_INTTOPTR { p0, s64 } %1
+ ; CHECK: %1(p0) = G_INTTOPTR %0
+ ; CHECK: %2(s64) = G_PTRTOINT %1
+ %1(p0) = G_INTTOPTR %0
+ %2(s64) = G_PTRTOINT %1
- ; CHECK: [[TST32:%[0-9]+]](32) = G_ANYEXT { s32, s1 } %3
- ; CHECK: G_BRCOND s32 [[TST32]], %bb.1.next
- %3(1) = G_TRUNC { s1, s64 } %0
- G_BRCOND s1 %3, %bb.1.next
+ ; CHECK: [[TST32:%[0-9]+]](s32) = G_ANYEXT %3
+ ; CHECK: G_BRCOND [[TST32]], %bb.1.next
+ %3(s1) = G_TRUNC %0
+ G_BRCOND %3, %bb.1.next
bb.1.next:
- %4(32) = G_TRUNC { s32, s64 } %0
+ %4(s32) = G_TRUNC %0
- ; CHECK: %5(1) = G_FPTOSI { s1, s32 } %4
- ; CHECK: %6(8) = G_FPTOUI { s8, s32 } %4
- ; CHECK: %7(16) = G_FPTOSI { s16, s32 } %4
- ; CHECK: %8(32) = G_FPTOUI { s32, s32 } %4
- ; CHECK: %9(64) = G_FPTOSI { s64, s32 } %4
- %5(1) = G_FPTOSI { s1, s32 } %4
- %6(8) = G_FPTOUI { s8, s32 } %4
- %7(16) = G_FPTOSI { s16, s32 } %4
- %8(32) = G_FPTOUI { s32, s32 } %4
- %9(64) = G_FPTOSI { s64, s32 } %4
+ ; CHECK: %5(s1) = G_FPTOSI %4
+ ; CHECK: %6(s8) = G_FPTOUI %4
+ ; CHECK: %7(s16) = G_FPTOSI %4
+ ; CHECK: %8(s32) = G_FPTOUI %4
+ ; CHECK: %9(s64) = G_FPTOSI %4
+ %5(s1) = G_FPTOSI %4
+ %6(s8) = G_FPTOUI %4
+ %7(s16) = G_FPTOSI %4
+ %8(s32) = G_FPTOUI %4
+ %9(s64) = G_FPTOSI %4
- ; CHECK: %10(1) = G_FPTOUI { s1, s64 } %0
- ; CHECK: %11(8) = G_FPTOSI { s8, s64 } %0
- ; CHECK: %12(16) = G_FPTOUI { s16, s64 } %0
- ; CHECK: %13(32) = G_FPTOSI { s32, s64 } %0
- ; CHECK: %14(32) = G_FPTOUI { s64, s64 } %0
- %10(1) = G_FPTOUI { s1, s64 } %0
- %11(8) = G_FPTOSI { s8, s64 } %0
- %12(16) = G_FPTOUI { s16, s64 } %0
- %13(32) = G_FPTOSI { s32, s64 } %0
- %14(32) = G_FPTOUI { s64, s64 } %0
+ ; CHECK: %10(s1) = G_FPTOUI %0
+ ; CHECK: %11(s8) = G_FPTOSI %0
+ ; CHECK: %12(s16) = G_FPTOUI %0
+ ; CHECK: %13(s32) = G_FPTOSI %0
+ ; CHECK: %14(s32) = G_FPTOUI %0
+ %10(s1) = G_FPTOUI %0
+ %11(s8) = G_FPTOSI %0
+ %12(s16) = G_FPTOUI %0
+ %13(s32) = G_FPTOSI %0
+ %14(s32) = G_FPTOUI %0
- ; CHECK: %15(32) = G_UITOFP { s32, s1 } %5
- ; CHECK: %16(32) = G_SITOFP { s32, s8 } %11
- ; CHECK: %17(32) = G_UITOFP { s32, s16 } %7
- ; CHECK: %18(32) = G_SITOFP { s32, s32 } %4
- ; CHECK: %19(32) = G_UITOFP { s32, s64 } %0
- %15(32) = G_UITOFP { s32, s1 } %5
- %16(32) = G_SITOFP { s32, s8 } %11
- %17(32) = G_UITOFP { s32, s16 } %7
- %18(32) = G_SITOFP { s32, s32 } %4
- %19(32) = G_UITOFP { s32, s64 } %0
+ ; CHECK: %15(s32) = G_UITOFP %5
+ ; CHECK: %16(s32) = G_SITOFP %11
+ ; CHECK: %17(s32) = G_UITOFP %7
+ ; CHECK: %18(s32) = G_SITOFP %4
+ ; CHECK: %19(s32) = G_UITOFP %0
+ %15(s32) = G_UITOFP %5
+ %16(s32) = G_SITOFP %11
+ %17(s32) = G_UITOFP %7
+ %18(s32) = G_SITOFP %4
+ %19(s32) = G_UITOFP %0
- ; CHECK: %20(64) = G_SITOFP { s64, s1 } %5
- ; CHECK: %21(64) = G_UITOFP { s64, s8 } %11
- ; CHECK: %22(64) = G_SITOFP { s64, s16 } %7
- ; CHECK: %23(64) = G_UITOFP { s64, s32 } %4
- ; CHECK: %24(64) = G_SITOFP { s64, s64 } %0
- %20(64) = G_SITOFP { s64, s1 } %5
- %21(64) = G_UITOFP { s64, s8 } %11
- %22(64) = G_SITOFP { s64, s16 } %7
- %23(64) = G_UITOFP { s64, s32 } %4
- %24(64) = G_SITOFP { s64, s64 } %0
+ ; CHECK: %20(s64) = G_SITOFP %5
+ ; CHECK: %21(s64) = G_UITOFP %11
+ ; CHECK: %22(s64) = G_SITOFP %7
+ ; CHECK: %23(s64) = G_UITOFP %4
+ ; CHECK: %24(s64) = G_SITOFP %0
+ %20(s64) = G_SITOFP %5
+ %21(s64) = G_UITOFP %11
+ %22(s64) = G_SITOFP %7
+ %23(s64) = G_UITOFP %4
+ %24(s64) = G_SITOFP %0
- ; CHECK: %25(1) = G_SELECT { s1, s1 } %10, %10, %5
- ; CHECK: %26(8) = G_SELECT { s8, s1 } %10, %6, %11
- ; CHECK: %27(16) = G_SELECT { s16, s1 } %10, %12, %7
- ; CHECK: %28(32) = G_SELECT { s32, s1 } %10, %15, %16
- ; CHECK: %29(64) = G_SELECT { s64, s1 } %10, %9, %24
- %25(1) = G_SELECT { s1, s1 } %10, %10, %5
- %26(8) = G_SELECT { s8, s1 } %10, %6, %11
- %27(16) = G_SELECT { s16, s1 } %10, %12, %7
- %28(32) = G_SELECT { s32, s1 } %10, %15, %16
- %29(64) = G_SELECT { s64, s1 } %10, %9, %24
+ ; CHECK: %25(s1) = G_SELECT %10, %10, %5
+ ; CHECK: %26(s8) = G_SELECT %10, %6, %11
+ ; CHECK: %27(s16) = G_SELECT %10, %12, %7
+ ; CHECK: %28(s32) = G_SELECT %10, %15, %16
+ ; CHECK: %29(s64) = G_SELECT %10, %9, %24
+ %25(s1) = G_SELECT %10, %10, %5
+ %26(s8) = G_SELECT %10, %6, %11
+ %27(s16) = G_SELECT %10, %12, %7
+ %28(s32) = G_SELECT %10, %15, %16
+ %29(s64) = G_SELECT %10, %9, %24
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_sub_small
- ; CHECK: [[RES:%.*]](8) = G_SUB s8 %0, %1
+ ; CHECK: [[RES:%.*]](s8) = G_SUB %2, %3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(8) = G_TRUNC { s8, s64 } %0
- %3(8) = G_TRUNC { s8, s64 } %1
- %4(8) = G_SUB s8 %0, %1
- %5(64) = G_ANYEXT { s64, s8 } %2
+ %0(s64) = COPY %x0
+ %1(s64) = COPY %x1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
+ %4(s8) = G_SUB %2, %3
+ %5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_xor_small
- ; CHECK: %4(8) = G_XOR s8 %2, %3
+ ; CHECK: %4(s8) = G_XOR %2, %3
- %0(64) = G_TYPE s64 %x0
- %1(64) = G_TYPE s64 %x1
- %2(8) = G_TRUNC { s8, s64 } %0
- %3(8) = G_TRUNC { s8, s64 } %1
- %4(8) = G_XOR s8 %2, %3
- %5(64) = G_ANYEXT { s64, s8 } %2
+ %0(s64) = G_TYPE %x0
+ %1(s64) = G_TYPE %x1
+ %2(s8) = G_TRUNC %0
+ %3(s8) = G_TRUNC %1
+ %4(s8) = G_XOR %2, %3
+ %5(s64) = G_ANYEXT %2
%x0 = COPY %5
...
...
---
# CHECK: *** Bad machine code: Generic virtual register must have a bank in a RegBankSelected function ***
-# CHECK: instruction: %vreg0<def>(64) = G_TYPE
+# CHECK: instruction: %vreg0<def>(s64) = G_TYPE
# CHECK: operand 0: %vreg0<def>
name: test
regBankSelected: true
body: |
bb.0:
liveins: %x0
- %0(64) = G_TYPE s64 %x0
+ %0(s64) = G_TYPE %x0
...
body: |
bb.0:
liveins: %x0
- %0 = G_TYPE s64 %x0
+ %0 = G_TYPE %x0
; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function ***
- ; CHECK: instruction: %vreg1<def> = G_ADD { s32 }
- %1 = G_ADD s32 %0, %0
+ ; CHECK: instruction: %vreg1<def> = G_ADD
+ %1 = G_ADD %0, %0
; CHECK: *** Bad machine code: Generic virtual register invalid in a Selected function ***
- ; CHECK: instruction: %vreg2<def>(64) = COPY
+ ; CHECK: instruction: %vreg2<def>(s64) = COPY
; CHECK: operand 0: %vreg2<def>
- %2(64) = COPY %x0
+ %2(s64) = COPY %x0
...
; Tests for add.
; CHECK: name: addi32
-; CHECK: G_ADD s32
+; CHECK: {{%[0-9]+}}(s32) = G_ADD
define i32 @addi32(i32 %arg1, i32 %arg2) {
%res = add i32 %arg1, %arg2
ret i32 %res
...
---
# Completely invalid code, but it checks that intrinsics round-trip properly.
-# CHECK: %0(64) = COPY intrinsic(@llvm.AMDGPU.bfe.i32)
+# CHECK: %0(s64) = COPY intrinsic(@llvm.AMDGPU.bfe.i32)
name: use_intrin
registers:
- { id: 0, class: _ }
body: |
bb.0:
- %0(64) = COPY intrinsic(@llvm.AMDGPU.bfe.i32)
+ %0(s64) = COPY intrinsic(@llvm.AMDGPU.bfe.i32)
...
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+ - { id: 8, class: _ }
body: |
bb.0:
- liveins: %edi
- ; CHECK: %1(32) = G_ADD s32 %0
- %0(32) = COPY %edi
- %1(32) = G_ADD s32 %0, %0
- ; CHECK: %2(64) = G_ADD <2 x s32> %0
- %2(64) = G_ADD <2 x s32> %0, %0
- ; CHECK: %3(64) = G_ADD s64 %0
- %3(64) = G_ADD s64 %0, %0
- ; G_ADD is actually not a valid operand for structure type,
- ; but that is the only one we have for now for testing.
- ; CHECK: %4(64) = G_ADD s64 %0
- %4(64) = G_ADD s64 %0, %0
- ; CHECK: %5(48) = G_ADD s48 %0
- %5(48) = G_ADD s48 %0, %0
+ liveins: %edi, %xmm0
+ ; CHECK: %1(s32) = G_ADD %0
+ %0(s32) = COPY %edi
+ %6(<2 x s32>) = COPY %xmm0
+ %7(s64) = COPY %rdi
+
+ %1(s32) = G_ADD %0, %0
+ ; CHECK: %2(<2 x s32>) = G_ADD %6, %6
+ %2(<2 x s32>) = G_ADD %6, %6
+ ; CHECK: %3(s64) = G_ADD %7, %7
+ %3(s64) = G_ADD %7, %7
+
+ ; CHECK: %5(s48) = G_ADD %8, %8
+ %8(s48) = G_TRUNC %7
+ %5(s48) = G_ADD %8, %8
...
---
bb.0:
successors: %bb.0
- ; CHECK: G_BR unsized %bb.0
- G_BR unsized %bb.0
+ ; CHECK: G_BR %bb.0
+ G_BR %bb.0
...