///
/// \return true if the lowering succeeded, false otherwise.
virtual bool lowerCall(MachineIRBuilder &MIRBuilder,
- const MachineOperand &Callee, ArrayRef<MVT> ResTys,
- ArrayRef<unsigned> ResRegs, ArrayRef<MVT> ArgTys,
+ const MachineOperand &Callee, ArrayRef<Type *> ResTys,
+ ArrayRef<unsigned> ResRegs, ArrayRef<Type *> ArgTys,
ArrayRef<unsigned> ArgRegs) const {
return false;
}
unsigned CarryOut, unsigned Op0, unsigned Op1,
unsigned CarryIn);
+ /// Build and insert \p Res<def> = G_TYPE \p Ty \p Op.
+ ///
+ /// G_TYPE gives a specified input register a type.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Op must be a physical register or a virtual register with a
+ /// register-class already attached (i.e. it cannot be a generic virtual
+ /// register).
+ ///
+ /// \return The newly created instruction.
+ MachineInstrBuilder buildType(LLT Ty, unsigned Res, unsigned Op);
+
/// Build and insert \p Res<def> = G_ANYEXT \p { DstTy, SrcTy } \p Op0
///
/// G_ANYEXT produces a register of the specified width, with bits 0 to
let hasSideEffects = 0;
}
+def G_TYPE : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$imm);
+ let hasSideEffects = 0;
+}
+
//------------------------------------------------------------------------------
// Binary ops.
//------------------------------------------------------------------------------
/// Generic unsigned-int to float conversion
HANDLE_TARGET_OPCODE(G_UITOFP)
+/// Generic type specifier for untyped registers.
+HANDLE_TARGET_OPCODE(G_TYPE)
+
/// Generic BRANCH instruction. This is an unconditional branch.
HANDLE_TARGET_OPCODE(G_BR)
// First step is to marshall all the function's parameters into the correct
// physregs and memory locations. Gather the sequence of argument types that
// we'll pass to the assigner function.
- SmallVector<MVT, 8> ArgTys;
+ SmallVector<Type *, 8> ArgTys;
for (auto &Arg : CI.arg_operands())
- ArgTys.push_back(MVT::getVT(Arg->getType()));
+ ArgTys.push_back(Arg->getType());
MachineOperand Callee = MachineOperand::CreateImm(0);
if (Function *F = CI.getCalledFunction())
else
Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
- return lowerCall(MIRBuilder, Callee, MVT::getVT(CI.getType()),
+ return lowerCall(MIRBuilder, Callee, CI.getType(),
ResReg ? ResReg : ArrayRef<unsigned>(), ArgTys, ArgRegs);
}
.addUse(CarryIn);
}
+MachineInstrBuilder MachineIRBuilder::buildType(LLT Ty,
+ unsigned Res, unsigned Op) {
+ return buildInstr(TargetOpcode::G_TYPE, Ty).addDef(Res).addUse(Op);
+}
+
MachineInstrBuilder MachineIRBuilder::buildAnyExt(ArrayRef<LLT> Tys,
unsigned Res, unsigned Op) {
validateTruncExt(Tys, true);
default:
return UnableToLegalize;
case TargetOpcode::G_FREM: {
- MVT Ty = MVT::getFloatingPointVT(MI.getType().getSizeInBits());
+ auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ Type *Ty = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
const char *Name =
DefaultActions[TargetOpcode::G_ANYEXT] = Legal;
DefaultActions[TargetOpcode::G_TRUNC] = Legal;
+ // G_TYPE is essentially an annotated COPY so it's always legal.
+ DefaultActions[TargetOpcode::G_TYPE] = Legal;
+
DefaultActions[TargetOpcode::G_INTRINSIC] = Legal;
DefaultActions[TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS] = Legal;
bool CompleteMapping = true;
// For copies we want to walk over the operands and try to find one
// that has a register bank.
- bool isCopyLike = MI.isCopy() || MI.isPHI();
+ bool isCopyLike =
+ MI.isCopy() || MI.isPHI() || MI.getOpcode() == TargetOpcode::G_TYPE;
// Remember the register bank for reuse for copy-like instructions.
const RegisterBank *RegBank = nullptr;
// Remember the size of the register for reuse for copy-like instructions.
}
// Generic opcodes must not have physical register operands.
- if (isPreISelGenericOpcode(MCID.getOpcode())) {
+ if (isPreISelGenericOpcode(MCID.getOpcode()) &&
+ MCID.getOpcode() != TargetOpcode::G_TYPE) {
for (auto &Op : MI->operands()) {
if (Op.isReg() && TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
report("Generic instruction cannot have physical register", MI);
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
- handleAssignments(
- MIRBuilder, AssignFn, MVT::getVT(Val->getType()), VReg,
- [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.buildCopy(PhysReg, ValReg);
- MIB.addUse(PhysReg, RegState::Implicit);
- });
+ handleAssignments(MIRBuilder, AssignFn, Val->getType(), VReg,
+ [&](MachineIRBuilder &MIRBuilder, Type *Ty,
+ unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.buildCopy(PhysReg, ValReg);
+ MIB.addUse(PhysReg, RegState::Implicit);
+ });
}
return true;
}
bool AArch64CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
CCAssignFn *AssignFn,
- ArrayRef<MVT> ArgTypes,
+ ArrayRef<Type *> ArgTypes,
ArrayRef<unsigned> ArgRegs,
AssignFnTy AssignValToReg) const {
MachineFunction &MF = MIRBuilder.getMF();
CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
unsigned NumArgs = ArgTypes.size();
- auto CurVT = ArgTypes.begin();
- for (unsigned i = 0; i != NumArgs; ++i, ++CurVT) {
- bool Res = AssignFn(i, *CurVT, *CurVT, CCValAssign::Full, ISD::ArgFlagsTy(),
- CCInfo);
- if (Res)
+ auto CurTy = ArgTypes.begin();
+ for (unsigned i = 0; i != NumArgs; ++i, ++CurTy) {
+ MVT CurVT = MVT::getVT(*CurTy);
+ if (AssignFn(i, CurVT, CurVT, CCValAssign::Full, ISD::ArgFlagsTy(), CCInfo))
return false;
}
assert(ArgLocs.size() == ArgTypes.size() &&
// Everything checks out, tell the caller where we've decided this
// parameter/return value should go.
- AssignValToReg(MIRBuilder, ArgRegs[i], VA.getLocReg());
+ AssignValToReg(MIRBuilder, ArgTypes[i], ArgRegs[i], VA.getLocReg());
}
return true;
}
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
- SmallVector<MVT, 8> ArgTys;
+ SmallVector<Type *, 8> ArgTys;
for (auto &Arg : Args)
- ArgTys.push_back(MVT::getVT(Arg.getType()));
+ ArgTys.push_back(Arg.getType());
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
CCAssignFn *AssignFn =
TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
- return handleAssignments(
- MIRBuilder, AssignFn, ArgTys, VRegs,
- [](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.getMBB().addLiveIn(PhysReg);
- MIRBuilder.buildCopy(ValReg, PhysReg);
- });
+ return handleAssignments(MIRBuilder, AssignFn, ArgTys, VRegs,
+ [](MachineIRBuilder &MIRBuilder, Type *Ty,
+ unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.getMBB().addLiveIn(PhysReg);
+ MIRBuilder.buildType(LLT{*Ty}, ValReg, PhysReg);
+ });
}
bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const MachineOperand &Callee,
- ArrayRef<MVT> ResTys,
+ ArrayRef<Type *> ResTys,
ArrayRef<unsigned> ResRegs,
- ArrayRef<MVT> ArgTys,
+ ArrayRef<Type *> ArgTys,
ArrayRef<unsigned> ArgRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
// And finally we can do the actual assignments. For a call we need to keep
// track of the registers used because they'll be implicit uses of the BL.
SmallVector<unsigned, 8> PhysRegs;
- handleAssignments(
- MIRBuilder, CallAssignFn, ArgTys, ArgRegs,
- [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.buildCopy(PhysReg, ValReg);
- PhysRegs.push_back(PhysReg);
- });
+ handleAssignments(MIRBuilder, CallAssignFn, ArgTys, ArgRegs,
+ [&](MachineIRBuilder &MIRBuilder, Type *Ty, unsigned ValReg,
+ unsigned PhysReg) {
+ MIRBuilder.buildCopy(PhysReg, ValReg);
+ PhysRegs.push_back(PhysReg);
+ });
// Now we can build the actual call instruction.
auto MIB = MIRBuilder.buildInstr(Callee.isReg() ? AArch64::BLR : AArch64::BL);
// implicit-define of the call instruction.
CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
if (!ResRegs.empty())
- handleAssignments(
- MIRBuilder, RetAssignFn, ResTys, ResRegs,
- [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.buildCopy(ValReg, PhysReg);
- MIB.addDef(PhysReg, RegState::Implicit);
- });
+ handleAssignments(MIRBuilder, RetAssignFn, ResTys, ResRegs,
+ [&](MachineIRBuilder &MIRBuilder, Type *Ty,
+ unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.buildType(LLT{*Ty}, ValReg, PhysReg);
+ MIB.addDef(PhysReg, RegState::Implicit);
+ });
return true;
}
ArrayRef<unsigned> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, const MachineOperand &Callee,
- ArrayRef<MVT> ResTys, ArrayRef<unsigned> ResRegs,
- ArrayRef<MVT> ArgTys,
+ ArrayRef<Type *> ResTys, ArrayRef<unsigned> ResRegs,
+ ArrayRef<Type *> ArgTys,
ArrayRef<unsigned> ArgRegs) const override;
private:
- typedef std::function<void(MachineIRBuilder &, unsigned, unsigned)>
+ typedef std::function<void(MachineIRBuilder &, Type *, unsigned, unsigned)>
AssignFnTy;
bool handleAssignments(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn,
- ArrayRef<MVT> ArgsTypes, ArrayRef<unsigned> ArgRegs,
+ ArrayRef<Type *> ArgsTypes, ArrayRef<unsigned> ArgRegs,
AssignFnTy AssignValToReg) const;
};
} // End of namespace llvm;
return true;
}
+ case TargetOpcode::G_TYPE: {
+ I.setDesc(TII.get(TargetOpcode::COPY));
+ I.removeTypes();
+ return true;
+ }
+
case TargetOpcode::G_FRAME_INDEX: {
// allocas and G_FRAME_INDEX are only supported in addrspace(0).
if (I.getType() != LLT::pointer(0)) {
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
-
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE: {
LLT MemTy = I.getType(0);
target triple = "aarch64-linux-gnu"
; CHECK-LABEL: name: args_i32
-; CHECK: %[[ARG0:[0-9]+]](32) = COPY %w0
-; CHECK: %{{[0-9]+}}(32) = COPY %w1
-; CHECK: %{{[0-9]+}}(32) = COPY %w2
-; CHECK: %{{[0-9]+}}(32) = COPY %w3
-; CHECK: %{{[0-9]+}}(32) = COPY %w4
-; CHECK: %{{[0-9]+}}(32) = COPY %w5
-; CHECK: %{{[0-9]+}}(32) = COPY %w6
-; CHECK: %{{[0-9]+}}(32) = COPY %w7
+; CHECK: %[[ARG0:[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w1
+; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w2
+; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w3
+; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w4
+; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w5
+; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w6
+; CHECK: %{{[0-9]+}}(32) = G_TYPE s32 %w7
; CHECK: %w0 = COPY %[[ARG0]]
define i32 @args_i32(i32 %w0, i32 %w1, i32 %w2, i32 %w3,
}
; CHECK-LABEL: name: args_i64
-; CHECK: %[[ARG0:[0-9]+]](64) = COPY %x0
-; CHECK: %{{[0-9]+}}(64) = COPY %x1
-; CHECK: %{{[0-9]+}}(64) = COPY %x2
-; CHECK: %{{[0-9]+}}(64) = COPY %x3
-; CHECK: %{{[0-9]+}}(64) = COPY %x4
-; CHECK: %{{[0-9]+}}(64) = COPY %x5
-; CHECK: %{{[0-9]+}}(64) = COPY %x6
-; CHECK: %{{[0-9]+}}(64) = COPY %x7
+; CHECK: %[[ARG0:[0-9]+]](64) = G_TYPE s64 %x0
+; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x1
+; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x2
+; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x3
+; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x4
+; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x5
+; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x6
+; CHECK: %{{[0-9]+}}(64) = G_TYPE s64 %x7
; CHECK: %x0 = COPY %[[ARG0]]
define i64 @args_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3,
i64 %x4, i64 %x5, i64 %x6, i64 %x7) {
; CHECK-LABEL: name: args_ptrs
-; CHECK: %[[ARG0:[0-9]+]](64) = COPY %x0
-; CHECK: %{{[0-9]+}}(64) = COPY %x1
-; CHECK: %{{[0-9]+}}(64) = COPY %x2
-; CHECK: %{{[0-9]+}}(64) = COPY %x3
-; CHECK: %{{[0-9]+}}(64) = COPY %x4
-; CHECK: %{{[0-9]+}}(64) = COPY %x5
-; CHECK: %{{[0-9]+}}(64) = COPY %x6
-; CHECK: %{{[0-9]+}}(64) = COPY %x7
+; CHECK: %[[ARG0:[0-9]+]](64) = G_TYPE p0 %x0
+; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x1
+; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x2
+; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x3
+; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x4
+; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x5
+; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x6
+; CHECK: %{{[0-9]+}}(64) = G_TYPE p0 %x7
; CHECK: %x0 = COPY %[[ARG0]]
define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3,
[3 x float]* %x4, double* %x5, i8* %x6, i8* %x7) {
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_ADD s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_ADD s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_SUB s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_SUB s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_OR s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_OR s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_XOR s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_XOR s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_AND s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_AND s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_SHL s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_SHL s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_LSHR s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_LSHR s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_ASHR s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_ASHR s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_MUL s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_MUL s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_SDIV s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_SDIV s64 %0, %1
...
bb.0:
liveins: %w0, %w1
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(32) = G_UDIV s32 %0, %1
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_UDIV s64 %0, %1
...
bb.0:
liveins: %s0, %s1
- %0(32) = COPY %s0
- %1(32) = COPY %s1
+ %0(32) = G_TYPE s32 %s0
+ %1(32) = G_TYPE s32 %s1
%2(32) = G_FADD s32 %0, %1
...
bb.0:
liveins: %d0, %d1
- %0(64) = COPY %d0
- %1(64) = COPY %d1
+ %0(64) = G_TYPE s64 %d0
+ %1(64) = G_TYPE s64 %d1
%2(64) = G_FADD s64 %0, %1
...
bb.0:
liveins: %s0, %s1
- %0(32) = COPY %s0
- %1(32) = COPY %s1
+ %0(32) = G_TYPE s32 %s0
+ %1(32) = G_TYPE s32 %s1
%2(32) = G_FSUB s32 %0, %1
...
bb.0:
liveins: %d0, %d1
- %0(64) = COPY %d0
- %1(64) = COPY %d1
+ %0(64) = G_TYPE s64 %d0
+ %1(64) = G_TYPE s64 %d1
%2(64) = G_FSUB s64 %0, %1
...
bb.0:
liveins: %s0, %s1
- %0(32) = COPY %s0
- %1(32) = COPY %s1
+ %0(32) = G_TYPE s32 %s0
+ %1(32) = G_TYPE s32 %s1
%2(32) = G_FMUL s32 %0, %1
...
bb.0:
liveins: %d0, %d1
- %0(64) = COPY %d0
- %1(64) = COPY %d1
+ %0(64) = G_TYPE s64 %d0
+ %1(64) = G_TYPE s64 %d1
%2(64) = G_FMUL s64 %0, %1
...
bb.0:
liveins: %s0, %s1
- %0(32) = COPY %s0
- %1(32) = COPY %s1
+ %0(32) = G_TYPE s32 %s0
+ %1(32) = G_TYPE s32 %s1
%2(32) = G_FDIV s32 %0, %1
...
bb.0:
liveins: %d0, %d1
- %0(64) = COPY %d0
- %1(64) = COPY %d1
+ %0(64) = G_TYPE s64 %d0
+ %1(64) = G_TYPE s64 %d1
%2(64) = G_FDIV s64 %0, %1
...
bb.0:
liveins: %x0
- %0(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
%1(64) = G_LOAD { s64, p0 } %0 :: (load 8 from %ir.addr)
...
bb.0:
liveins: %x0
- %0(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
%1(32) = G_LOAD { s32, p0 } %0 :: (load 4 from %ir.addr)
...
bb.0:
liveins: %x0, %x1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
G_STORE { s64, p0 } %1, %0 :: (store 8 into %ir.addr)
...
bb.0:
liveins: %x0, %w1
- %0(64) = COPY %x0
- %1(32) = COPY %w1
+ %0(64) = G_TYPE s64 %x0
+ %1(32) = G_TYPE s32 %w1
G_STORE { s32, p0 } %1, %0 :: (store 4 into %ir.addr)
...
; Tests for add.
; CHECK-LABEL: name: addi64
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_ADD s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
}
; CHECK-LABEL: name: muli64
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_MUL s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
; CHECK: %[[FALSE:[0-9a-zA-Z._-]+]]({{0x[a-f0-9]+ / 0x[a-f0-9]+}} = 50.00%)
;
; Check that we emit the correct branch.
-; CHECK: [[ADDR:%.*]](64) = COPY %x0
+; CHECK: [[ADDR:%.*]](64) = G_TYPE p0 %x0
; CHECK: [[TST:%.*]](1) = G_LOAD { s1, p0 } [[ADDR]]
; CHECK: G_BRCOND s1 [[TST]], %[[TRUE]]
; CHECK: G_BR unsized %[[FALSE]]
; Tests for or.
; CHECK-LABEL: name: ori64
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_OR s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
}
; CHECK-LABEL: name: ori32
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_OR s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
; Tests for xor.
; CHECK-LABEL: name: xori64
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_XOR s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
}
; CHECK-LABEL: name: xori32
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_XOR s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
; Tests for and.
; CHECK-LABEL: name: andi64
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_AND s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
}
; CHECK-LABEL: name: andi32
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_AND s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
; Tests for sub.
; CHECK-LABEL: name: subi64
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = G_TYPE s64 %x1
; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_SUB s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
}
; CHECK-LABEL: name: subi32
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SUB s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
}
; CHECK-LABEL: name: ptrtoint
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: [[RES:%[0-9]+]](64) = G_PTRTOINT { s64, p0 } [[ARG1]]
; CHECK: %x0 = COPY [[RES]]
; CHECK: RET_ReallyLR implicit %x0
}
; CHECK-LABEL: name: inttoptr
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
; CHECK: [[RES:%[0-9]+]](64) = G_INTTOPTR { p0, s64 } [[ARG1]]
; CHECK: %x0 = COPY [[RES]]
; CHECK: RET_ReallyLR implicit %x0
}
; CHECK-LABEL: name: trivial_bitcast
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: %x0 = COPY [[ARG1]]
; CHECK: RET_ReallyLR implicit %x0
define i64* @trivial_bitcast(i8* %a) {
}
; CHECK-LABEL: name: trivial_bitcast_with_copy
-; CHECK: [[A:%[0-9]+]](64) = COPY %x0
+; CHECK: [[A:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: G_BR unsized %[[CAST:bb\.[0-9]+]]
; CHECK: [[CAST]]:
}
; CHECK-LABEL: name: bitcast
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
; CHECK: [[RES1:%[0-9]+]](64) = G_BITCAST { <2 x s32>, s64 } [[ARG1]]
; CHECK: [[RES2:%[0-9]+]](64) = G_BITCAST { s64, <2 x s32> } [[RES1]]
; CHECK: %x0 = COPY [[RES2]]
}
; CHECK-LABEL: name: trunc
-; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]](64) = G_TYPE s64 %x0
; CHECK: [[VEC:%[0-9]+]](128) = G_LOAD { <4 x s32>, p0 }
; CHECK: [[RES1:%[0-9]+]](8) = G_TRUNC { s8, s64 } [[ARG1]]
; CHECK: [[RES2:%[0-9]+]](64) = G_TRUNC { <4 x s16>, <4 x s32> } [[VEC]]
}
; CHECK-LABEL: name: load
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x0
-; CHECK: [[ADDR42:%[0-9]+]](64) = COPY %x1
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
+; CHECK: [[ADDR42:%[0-9]+]](64) = G_TYPE p42 %x1
; CHECK: [[VAL1:%[0-9]+]](64) = G_LOAD { s64, p0 } [[ADDR]] :: (load 8 from %ir.addr, align 16)
; CHECK: [[VAL2:%[0-9]+]](64) = G_LOAD { s64, p42 } [[ADDR42]] :: (load 8 from %ir.addr42)
; CHECK: [[SUM:%.*]](64) = G_ADD s64 [[VAL1]], [[VAL2]]
}
; CHECK-LABEL: name: store
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x0
-; CHECK: [[ADDR42:%[0-9]+]](64) = COPY %x1
-; CHECK: [[VAL1:%[0-9]+]](64) = COPY %x2
-; CHECK: [[VAL2:%[0-9]+]](64) = COPY %x3
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
+; CHECK: [[ADDR42:%[0-9]+]](64) = G_TYPE p42 %x1
+; CHECK: [[VAL1:%[0-9]+]](64) = G_TYPE s64 %x2
+; CHECK: [[VAL2:%[0-9]+]](64) = G_TYPE s64 %x3
; CHECK: G_STORE { s64, p0 } [[VAL1]], [[ADDR]] :: (store 8 into %ir.addr, align 16)
; CHECK: G_STORE { s64, p42 } [[VAL2]], [[ADDR42]] :: (store 8 into %ir.addr42)
; CHECK: RET_ReallyLR
}
; CHECK-LABEL: name: intrinsics
-; CHECK: [[CUR:%[0-9]+]](32) = COPY %w0
-; CHECK: [[BITS:%[0-9]+]](32) = COPY %w1
+; CHECK: [[CUR:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[BITS:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK: [[PTR:%[0-9]+]](64) = G_INTRINSIC { p0, s32 } intrinsic(@llvm.returnaddress), 0
; CHECK: [[PTR_VEC:%[0-9]+]](64) = G_FRAME_INDEX p0 %stack.0.ptr.vec
; CHECK: [[VEC:%[0-9]+]](64) = G_LOAD { <8 x s8>, p0 } [[PTR_VEC]]
; It's important that constants are after argument passing, but before the
; rest of the entry block.
; CHECK-LABEL: name: constant_int
-; CHECK: [[IN:%[0-9]+]](32) = COPY %w0
+; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
; CHECK: [[ONE:%[0-9]+]](32) = G_CONSTANT s32 1
; CHECK: G_BR unsized
}
; CHECK-LABEL: name: test_sext
-; CHECK: [[IN:%[0-9]+]](32) = COPY %w0
+; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
; CHECK: [[RES:%[0-9]+]](64) = G_SEXT { s64, s32 } [[IN]]
; CHECK: %x0 = COPY [[RES]]
define i64 @test_sext(i32 %in) {
}
; CHECK-LABEL: name: test_zext
-; CHECK: [[IN:%[0-9]+]](32) = COPY %w0
+; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
; CHECK: [[RES:%[0-9]+]](64) = G_ZEXT { s64, s32 } [[IN]]
; CHECK: %x0 = COPY [[RES]]
define i64 @test_zext(i32 %in) {
}
; CHECK-LABEL: name: test_shl
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SHL s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
; CHECK-LABEL: name: test_lshr
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_LSHR s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
}
; CHECK-LABEL: name: test_ashr
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_ASHR s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
}
; CHECK-LABEL: name: test_sdiv
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SDIV s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
}
; CHECK-LABEL: name: test_udiv
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_UDIV s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
}
; CHECK-LABEL: name: test_srem
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SREM s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
}
; CHECK-LABEL: name: test_urem
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_UREM s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
}
; CHECK-LABEL: name: test_struct_memops
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: [[VAL:%[0-9]+]](64) = G_LOAD { s64, p0 } [[ADDR]] :: (load 8 from %ir.addr, align 4)
; CHECK: G_STORE { s64, p0 } [[VAL]], [[ADDR]] :: (store 8 into %ir.addr, align 4)
define void @test_struct_memops({ i8, i32 }* %addr) {
}
; CHECK-LABEL: name: test_i1_memops
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: [[VAL:%[0-9]+]](1) = G_LOAD { s1, p0 } [[ADDR]] :: (load 1 from %ir.addr)
; CHECK: G_STORE { s1, p0 } [[VAL]], [[ADDR]] :: (store 1 into %ir.addr)
define void @test_i1_memops(i1* %addr) {
}
; CHECK-LABEL: name: int_comparison
-; CHECK: [[LHS:%[0-9]+]](32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]](32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
; CHECK: [[TST:%[0-9]+]](1) = G_ICMP { s1, s32 } intpred(ne), [[LHS]], [[RHS]]
; CHECK: G_STORE { s1, p0 } [[TST]], [[ADDR]]
define void @int_comparison(i32 %a, i32 %b, i1* %addr) {
}
; CHECK-LABEL: name: test_fadd
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FADD s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
}
; CHECK-LABEL: name: test_fsub
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FSUB s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
}
; CHECK-LABEL: name: test_fmul
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FMUL s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
}
; CHECK-LABEL: name: test_fdiv
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FDIV s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
}
; CHECK-LABEL: name: test_frem
-; CHECK: [[ARG1:%[0-9]+]](32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]](32) = G_TYPE s32 %s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = G_TYPE s32 %s1
; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_FREM s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %s0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %s0
}
; CHECK-LABEL: name: test_sadd_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]](32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_SADDO { s32, s1 } [[LHS]], [[RHS]]
; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
; CHECK: G_STORE { s64, p0 } [[RES]], [[ADDR]]
}
; CHECK-LABEL: name: test_uadd_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]](32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
; CHECK: [[ZERO:%[0-9]+]](1) = G_CONSTANT s1 0
; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_UADDE { s32, s1 } [[LHS]], [[RHS]], [[ZERO]]
; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
}
; CHECK-LABEL: name: test_ssub_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]](32) = COPY %w1
-; CHECK: [[SUBR:%[0-9]+]](64) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
+; CHECK: [[SUBR:%[0-9]+]](64) = G_TYPE p0 %x2
; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_SSUBO { s32, s1 } [[LHS]], [[RHS]]
; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
; CHECK: G_STORE { s64, p0 } [[RES]], [[SUBR]]
}
; CHECK-LABEL: name: test_usub_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]](32) = COPY %w1
-; CHECK: [[SUBR:%[0-9]+]](64) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
+; CHECK: [[SUBR:%[0-9]+]](64) = G_TYPE p0 %x2
; CHECK: [[ZERO:%[0-9]+]](1) = G_CONSTANT s1 0
; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_USUBE { s32, s1 } [[LHS]], [[RHS]], [[ZERO]]
; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
}
; CHECK-LABEL: name: test_smul_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]](32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_SMULO { s32, s1 } [[LHS]], [[RHS]]
; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
; CHECK: G_STORE { s64, p0 } [[RES]], [[ADDR]]
}
; CHECK-LABEL: name: test_umul_overflow
-; CHECK: [[LHS:%[0-9]+]](32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]](32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w0
+; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w1
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x2
; CHECK: [[VAL:%[0-9]+]](32), [[OVERFLOW:%[0-9]+]](1) = G_UMULO { s32, s1 } [[LHS]], [[RHS]]
; CHECK: [[RES:%[0-9]+]](64) = G_SEQUENCE { s64, s32, s1 } [[VAL]], 0, [[OVERFLOW]], 32
; CHECK: G_STORE { s64, p0 } [[RES]], [[ADDR]]
}
; CHECK-LABEL: name: test_insertvalue
-; CHECK: [[VAL:%[0-9]+]](32) = COPY %w1
+; CHECK: [[VAL:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK: [[STRUCT:%[0-9]+]](128) = G_LOAD { s128, p0 }
; CHECK: [[NEWSTRUCT:%[0-9]+]](128) = G_INSERT { s128, s32 } [[STRUCT]], [[VAL]], 64
; CHECK: G_STORE { s128, p0 } [[NEWSTRUCT]],
}
; CHECK-LABEL: name: test_select
-; CHECK: [[TST:%[0-9]+]](1) = COPY %w0
-; CHECK: [[LHS:%[0-9]+]](32) = COPY %w1
-; CHECK: [[RHS:%[0-9]+]](32) = COPY %w2
+; CHECK: [[TST:%[0-9]+]](1) = G_TYPE s1 %w0
+; CHECK: [[LHS:%[0-9]+]](32) = G_TYPE s32 %w1
+; CHECK: [[RHS:%[0-9]+]](32) = G_TYPE s32 %w2
; CHECK: [[RES:%[0-9]+]](32) = G_SELECT { s32, s1 } [[TST]], [[LHS]], [[RHS]]
; CHECK: %w0 = COPY [[RES]]
define i32 @test_select(i1 %tst, i32 %lhs, i32 %rhs) {
}
; CHECK-LABEL: name: test_fptosi
-; CHECK: [[FPADDR:%[0-9]+]](64) = COPY %x0
+; CHECK: [[FPADDR:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: [[FP:%[0-9]+]](32) = G_LOAD { s32, p0 } [[FPADDR]]
; CHECK: [[RES:%[0-9]+]](64) = G_FPTOSI { s64, s32 } [[FP]]
; CHECK: %x0 = COPY [[RES]]
}
; CHECK-LABEL: name: test_fptoui
-; CHECK: [[FPADDR:%[0-9]+]](64) = COPY %x0
+; CHECK: [[FPADDR:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: [[FP:%[0-9]+]](32) = G_LOAD { s32, p0 } [[FPADDR]]
; CHECK: [[RES:%[0-9]+]](64) = G_FPTOUI { s64, s32 } [[FP]]
; CHECK: %x0 = COPY [[RES]]
}
; CHECK-LABEL: name: test_sitofp
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x0
-; CHECK: [[IN:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
+; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK: [[FP:%[0-9]+]](64) = G_SITOFP { s64, s32 } [[IN]]
; CHECK: G_STORE { s64, p0 } [[FP]], [[ADDR]]
define void @test_sitofp(double* %addr, i32 %in) {
}
; CHECK-LABEL: name: test_uitofp
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x0
-; CHECK: [[IN:%[0-9]+]](32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
+; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w1
; CHECK: [[FP:%[0-9]+]](64) = G_UITOFP { s64, s32 } [[IN]]
; CHECK: G_STORE { s64, p0 } [[FP]], [[ADDR]]
define void @test_uitofp(double* %addr, i32 %in) {
}
; CHECK-LABEL: name: test_fpext
-; CHECK: [[IN:%[0-9]+]](32) = COPY %s0
+; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %s0
; CHECK: [[RES:%[0-9]+]](64) = G_FPEXT { s64, s32 } [[IN]]
; CHECK: %d0 = COPY [[RES]]
define double @test_fpext(float %in) {
}
; CHECK-LABEL: name: test_fptrunc
-; CHECK: [[IN:%[0-9]+]](64) = COPY %d0
+; CHECK: [[IN:%[0-9]+]](64) = G_TYPE s64 %d0
; CHECK: [[RES:%[0-9]+]](32) = G_FPTRUNC { s32, s64 } [[IN]]
; CHECK: %s0 = COPY [[RES]]
define float @test_fptrunc(double %in) {
}
; CHECK-LABEL: name: test_constant_float
-; CHECK: [[ADDR:%[0-9]+]](64) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: [[TMP:%[0-9]+]](32) = G_FCONSTANT s32 float 1.500000e+00
; CHECK: G_STORE { s32, p0 } [[TMP]], [[ADDR]]
define void @test_constant_float(float* %addr) {
}
; CHECK-LABEL: name: float_comparison
-; CHECK: [[LHSADDR:%[0-9]+]](64) = COPY %x0
-; CHECK: [[RHSADDR:%[0-9]+]](64) = COPY %x1
-; CHECK: [[BOOLADDR:%[0-9]+]](64) = COPY %x2
+; CHECK: [[LHSADDR:%[0-9]+]](64) = G_TYPE p0 %x0
+; CHECK: [[RHSADDR:%[0-9]+]](64) = G_TYPE p0 %x1
+; CHECK: [[BOOLADDR:%[0-9]+]](64) = G_TYPE p0 %x2
; CHECK: [[LHS:%[0-9]+]](32) = G_LOAD { s32, p0 } [[LHSADDR]]
; CHECK: [[RHS:%[0-9]+]](32) = G_LOAD { s32, p0 } [[RHSADDR]]
; CHECK: [[TST:%[0-9]+]](1) = G_FCMP { s1, s32 } floatpred(oge), [[LHS]], [[RHS]]
bb.0.entry:
liveins: %x0
; CHECK: %1(32) = G_ADD s32 %0
- %0(32) = COPY %w0
+ %0(32) = G_TYPE s32 %w0
%1(32) = G_ADD s32 %0, %0
...
body: |
bb.0.entry:
liveins: %d0
- ; CHECK: %0(64) = COPY %d0
+ ; CHECK: %0(64) = G_TYPE s64 %d0
; CHECK: %1(64) = G_ADD <2 x s32> %0
- %0(64) = COPY %d0
+ %0(64) = G_TYPE s64 %d0
%1(64) = G_ADD <2 x s32> %0, %0
...
body: |
bb.0.entry:
liveins: %s0, %x0
- ; CHECK: %0(32) = COPY %s0
- ; CHECK-NEXT: %1(32) = COPY %w0
+ ; CHECK: %0(32) = G_TYPE s32 %s0
+ ; CHECK-NEXT: %1(32) = G_TYPE s32 %w0
; CHECK-NEXT: %3(32) = COPY %0
; CHECK-NEXT: %2(32) = G_ADD s32 %3, %1
- %0(32) = COPY %s0
- %1(32) = COPY %w0
+ %0(32) = G_TYPE s32 %s0
+ %1(32) = G_TYPE s32 %w0
%2(32) = G_ADD s32 %0, %1
...
body: |
bb.0.entry:
liveins: %s0, %x0
- ; CHECK: %0(32) = COPY %s0
+ ; CHECK: %0(32) = G_TYPE s32 %s0
; CHECK-NEXT: %2(32) = COPY %0
; CHECK-NEXT: %3(32) = COPY %0
; CHECK-NEXT: %1(32) = G_ADD s32 %2, %3
- %0(32) = COPY %s0
+ %0(32) = G_TYPE s32 %s0
%1(32) = G_ADD s32 %0, %0
...
body: |
bb.0.entry:
liveins: %w0
- ; CHECK: %0(32) = COPY %w0
+ ; CHECK: %0(32) = G_TYPE s32 %w0
; CHECK-NEXT: %2(32) = G_ADD s32 %0, %0
; CHECK-NEXT: %1(32) = COPY %2
- %0(32) = COPY %w0
+ %0(32) = G_TYPE s32 %w0
%1(32) = G_ADD s32 %0, %0
...
liveins: %x0, %x1, %w2
%0 = LDRWui killed %x0, 0 :: (load 4 from %ir.src)
- %1 = COPY %x1
- %2 = COPY %w2
+ %1 = G_TYPE s64 %x1
+ %2 = G_TYPE s32 %w2
TBNZW killed %2, 0, %bb.2.end
bb.1.then:
body: |
bb.0.entry:
liveins: %w0, %s0
- ; CHECK: %0(32) = COPY %w0
- ; CHECK-NEXT: %1(32) = COPY %s0
+ ; CHECK: %0(32) = G_TYPE s32 %w0
+ ; CHECK-NEXT: %1(32) = G_TYPE s32 %s0
; CHECK-NEXT: %3(32) = COPY %1
; CHECK-NEXT: %2(32) = G_ADD s32 %0, %3
- %0(32) = COPY %w0
- %1(32) = COPY %s0
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %s0
%2(32) = G_ADD s32 %0, %1
...
body: |
bb.0.entry:
liveins: %w0
- ; CHECK: %0(32) = COPY %w0
+ ; CHECK: %0(32) = G_TYPE s32 %w0
; CHECK-NEXT: %1(32) = G_ADD s32 %0, %0
; CHECK-NEXT: %s0 = COPY %1
- %0(32) = COPY %w0
+ %0(32) = G_TYPE s32 %w0
%1(32) = G_ADD s32 %0, %0
%s0 = COPY %1
...
body: |
bb.0.entry:
liveins: %x0, %x1
- ; CHECK: %0(64) = COPY %x0
- ; CHECK-NEXT: %1(64) = COPY %x1
+ ; CHECK: %0(64) = G_TYPE s64 %x0
+ ; CHECK-NEXT: %1(64) = G_TYPE s64 %x1
; Fast mode tries to reuse the source of the copy for the destination.
; Greedy mode remapped the instruction on the GPR bank.
; GREEDY-NEXT: %2(64) = G_OR <2 x s32> %0, %1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_OR <2 x s32> %0, %1
...
body: |
bb.0.entry:
liveins: %x0, %x1
- ; CHECK: %0(64) = COPY %x0
- ; CHECK-NEXT: %1(64) = COPY %x1
+ ; CHECK: %0(64) = G_TYPE s64 %x0
+ ; CHECK-NEXT: %1(64) = G_TYPE s64 %x1
; Fast mode tries to reuse the source of the copy for the destination.
; GREEDY-NEXT: %3(64) = G_OR <2 x s32> %0, %1
; We need to keep %2 into FPR because we do not know anything about it.
; GREEDY-NEXT: %2(64) = COPY %3
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_OR <2 x s32> %0, %1
...
bb.0:
liveins: %x0
- ; CHECK: %0 = COPY %x0
+ ; CHECK: %0 = G_TYPE s64 %x0
; CHECK-NEXT: %1 = ADDXrr %0, %0
; CHECK-NEXT: %x0 = COPY %1
; CHECK-NEXT: RET_ReallyLR implicit %x0
- %0 = COPY %x0
+ %0 = G_TYPE s64 %x0
%1 = ADDXrr %0, %0
%x0 = COPY %1
RET_ReallyLR implicit %x0
; CHECK-LABEL: name: test_simple_return
; CHECK: BL @simple_return_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit-def %x0
-; CHECK: [[RES:%[0-9]+]](64) = COPY %x0
+; CHECK: [[RES:%[0-9]+]](64) = G_TYPE s64 %x0
; CHECK: %x0 = COPY [[RES]]
; CHECK: RET_ReallyLR implicit %x0
declare i64 @simple_return_callee()
}
; CHECK-LABEL: name: test_simple_arg
-; CHECK: [[IN:%[0-9]+]](32) = COPY %w0
+; CHECK: [[IN:%[0-9]+]](32) = G_TYPE s32 %w0
; CHECK: %w0 = COPY [[IN]]
; CHECK: BL @simple_arg_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0
; CHECK: RET_ReallyLR
}
; CHECK-LABEL: name: test_indirect_call
-; CHECK: [[FUNC:%[0-9]+]](64) = COPY %x0
+; CHECK: [[FUNC:%[0-9]+]](64) = G_TYPE p0 %x0
; CHECK: BLR [[FUNC]], csr_aarch64_aapcs, implicit-def %lr, implicit %sp
; CHECK: RET_ReallyLR
define void @test_indirect_call(void()* %func) {
}
; CHECK-LABEL: name: test_multiple_args
-; CHECK: [[IN:%[0-9]+]](64) = COPY %x0
+; CHECK: [[IN:%[0-9]+]](64) = G_TYPE s64 %x0
; CHECK: [[ANSWER:%[0-9]+]](32) = G_CONSTANT s32 42
; CHECK: %w0 = COPY [[ANSWER]]
; CHECK: %x1 = COPY [[IN]]
; CHECK: %x0 = COPY [[RES_LO]]
; CHECK: %x1 = COPY [[RES_HI]]
- %0(64) = COPY %x0
- %1(64) = COPY %x1
- %2(64) = COPY %x2
- %3(64) = COPY %x3
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
+ %2(64) = G_TYPE s64 %x2
+ %3(64) = G_TYPE s64 %x3
%4(128) = G_SEQUENCE { s128, s64, s64 } %0, 0, %1, 64
%5(128) = G_SEQUENCE { s128, s64, s64 } %2, 0, %3, 64
%6(128) = G_ADD s128 %4, %5
; CHECK-LABEL: name: test_scalar_add_small
; CHECK: [[RES:%.*]](8) = G_ADD s8 %2, %3
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_ADD s8 %2, %3
; CHECK: %q0 = COPY [[RES_LO]]
; CHECK: %q1 = COPY [[RES_HI]]
- %0(128) = COPY %q0
- %1(128) = COPY %q1
- %2(128) = COPY %q2
- %3(128) = COPY %q3
+ %0(128) = G_TYPE s128 %q0
+ %1(128) = G_TYPE s128 %q1
+ %2(128) = G_TYPE s128 %q2
+ %3(128) = G_TYPE s128 %q3
%4(256) = G_SEQUENCE { s256, s128, s128 } %0, 0, %1, 128
%5(256) = G_SEQUENCE { s256, s128, s128 } %2, 0, %3, 128
%6(256) = G_ADD <4 x s64> %4, %5
; CHECK-LABEL: name: test_scalar_and_small
; CHECK: %4(8) = G_AND s8 %2, %3
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_AND s8 %2, %3
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = COPY %x0
- %1(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x0
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
bb.0.entry:
liveins: %w0, %w1, %x2, %x3
- %0(32) = COPY %w0
- %1(32) = COPY %w1
+ %0(32) = G_TYPE s32 %w0
+ %1(32) = G_TYPE s32 %w1
%2(8) = G_TRUNC { s8, s32 } %0
; Only one of these extracts can be eliminated, the offsets don't match
; CHECK: %18(64) = G_EXTRACT { <2 x s32>, s128 } %17, 0
; CHECK: %19(64) = G_ADD <2 x s32> %18, %18
- %16(64) = COPY %x0
+ %16(64) = G_TYPE s64 %x0
%17(128) = G_SEQUENCE { s128, s64, s64 } %16, 0, %16, 64
%18(64) = G_EXTRACT { <2 x s32>, s128 } %17, 0
%19(64) = G_ADD <2 x s32> %18, %18
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(32) = G_TRUNC { s8, s64 } %0
%3(32) = G_TRUNC { s8, s64 } %1
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
; CHECK: %1(1) = G_TRUNC { s1, s64 } %0
; CHECK: %2(8) = G_TRUNC { s8, s64 } %0
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = COPY %x0
- %1(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x0
%2(8) = G_TRUNC { s32, s64 } %0
%3(8) = G_TRUNC { s32, s64 } %1
bb.0:
liveins: %x0
; CHECK-LABEL: name: test_copy
- ; CHECK: %0(64) = COPY %x0
+ ; CHECK: %0(64) = G_TYPE s64 %x0
; CHECK-NEXT: %x0 = COPY %0
- %0(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
%x0 = COPY %0
...
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_load
- %0(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
; CHECK: [[BIT8:%[0-9]+]](8) = G_LOAD { s8, p0 } %0 :: (load 1 from %ir.addr)
; CHECK: %1(1) = G_TRUNC { s1, s8 } [[BIT8]]
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_store
- %0(64) = COPY %x0
- %1(32) = COPY %w1
+ %0(64) = G_TYPE s64 %x0
+ %1(32) = G_TYPE s32 %w1
; CHECK: [[BIT8:%[0-9]+]](8) = G_ANYEXT { s8, s1 } %2
; CHECK: G_STORE { s8, p0 } [[BIT8]], %0 :: (store 1 into %ir.addr)
; CHECK-LABEL: name: test_scalar_mul_small
; CHECK: %4(8) = G_MUL s8 %2, %3
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_MUL s8 %2, %3
; CHECK-LABEL: name: test_scalar_or_small
; CHECK: %4(8) = G_OR s8 %2, %3
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_OR s8 %2, %3
; CHECK: [[QUOT:%[0-9]+]](64) = G_UDIV s64 %0, %1
; CHECK: [[PROD:%[0-9]+]](64) = G_MUL s64 [[QUOT]], %1
; CHECK: [[RES:%[0-9]+]](64) = G_SUB s64 %0, [[PROD]]
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(64) = G_UREM s64 %0, %1
; CHECK: [[QUOT:%[0-9]+]](32) = G_SDIV s32 %3, %4
; CHECK: %d0 = COPY %0
; CHECK: %d1 = COPY %1
; CHECK: BL $fmod, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0
- ; CHECK: %9(64) = COPY %d0
+ ; CHECK: %9(64) = G_TYPE s64 %d0
%9(64) = G_FREM s64 %0, %1
; CHECK: %s0 = COPY %3
; CHECK: %s1 = COPY %4
; CHECK: BL $fmodf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0
- ; CHECK: %10(32) = COPY %s0
+ ; CHECK: %10(32) = G_TYPE s32 %s0
%10(32) = G_FREM s32 %3, %4
...
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
- %0(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
; CHECK-LABEL: name: test_simple
; CHECK: %1(64) = G_PTRTOINT { s64, p0 } %0
; CHECK-LABEL: name: test_scalar_sub_small
; CHECK: [[RES:%.*]](8) = G_SUB s8 %0, %1
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_SUB s8 %0, %1
; CHECK-LABEL: name: test_scalar_xor_small
; CHECK: %4(8) = G_XOR s8 %2, %3
- %0(64) = COPY %x0
- %1(64) = COPY %x1
+ %0(64) = G_TYPE s64 %x0
+ %1(64) = G_TYPE s64 %x1
%2(8) = G_TRUNC { s8, s64 } %0
%3(8) = G_TRUNC { s8, s64 } %1
%4(8) = G_XOR s8 %2, %3
...
---
# CHECK: *** Bad machine code: Generic virtual register must have a bank in a RegBankSelected function ***
-# CHECK: instruction: %vreg0<def>(64) = COPY
+# CHECK: instruction: %vreg0<def>(64) = G_TYPE
# CHECK: operand 0: %vreg0<def>
name: test
regBankSelected: true
body: |
bb.0:
liveins: %x0
- %0(64) = COPY %x0
+ %0(64) = G_TYPE s64 %x0
...
body: |
bb.0:
liveins: %x0
- %0 = COPY %x0
+ %0 = G_TYPE s64 %x0
; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function ***
; CHECK: instruction: %vreg1<def> = G_ADD { s32 }
liveins: %edi
; CHECK: PATCHABLE_FUNCTION_ENTER
RETQ
- ; CHECK-NEXT: PATCHABLE_RET 2508
+ ; CHECK-NEXT: PATCHABLE_RET
RETQ
- ; CHECK-NEXT: PATCHABLE_RET 2508
+ ; CHECK-NEXT: PATCHABLE_RET
...