return true;
}
+static bool tryToSimplifyUADDO(MachineInstr &MI, MachineIRBuilder &B,
+ CombinerHelper &Helper,
+ GISelChangeObserver &Observer) {
+ // Try simplify G_UADDO with 8 or 16 bit operands to wide G_ADD and TBNZ if
+ // result is only used in the no-overflow case. It is restricted to cases
+ // where we know that the high-bits of the operands are 0. If there's an
+ // overflow, then the the 9th or 17th bit must be set, which can be checked
+ // using TBNZ.
+ //
+ // Change (for UADDOs on 8 and 16 bits):
+ //
+ // %z0 = G_ASSERT_ZEXT _
+ // %op0 = G_TRUNC %z0
+ // %z1 = G_ASSERT_ZEXT _
+ // %op1 = G_TRUNC %z1
+ // %val, %cond = G_UADDO %op0, %op1
+ // G_BRCOND %cond, %error.bb
+ //
+ // error.bb:
+ // (no successors and no uses of %val)
+ //
+ // To:
+ //
+ // %z0 = G_ASSERT_ZEXT _
+ // %z1 = G_ASSERT_ZEXT _
+ // %add = G_ADD %z0, %z1
+ // %val = G_TRUNC %add
+ // %bit = G_AND %add, 1 << scalar-size-in-bits(%op1)
+ // %cond = G_ICMP NE, %bit, 0
+ // G_BRCOND %cond, %error.bb
+
+ auto &MRI = *B.getMRI();
+
+ MachineOperand *DefOp0 = MRI.getOneDef(MI.getOperand(2).getReg());
+ MachineOperand *DefOp1 = MRI.getOneDef(MI.getOperand(3).getReg());
+ Register Op0Wide;
+ Register Op1Wide;
+ if (!mi_match(DefOp0->getParent(), MRI, m_GTrunc(m_Reg(Op0Wide))) ||
+ !mi_match(DefOp1->getParent(), MRI, m_GTrunc(m_Reg(Op1Wide))))
+ return false;
+ LLT WideTy0 = MRI.getType(Op0Wide);
+ LLT WideTy1 = MRI.getType(Op1Wide);
+ Register ResVal = MI.getOperand(0).getReg();
+ LLT OpTy = MRI.getType(ResVal);
+ MachineInstr *Op0WideDef = MRI.getVRegDef(Op0Wide);
+ MachineInstr *Op1WideDef = MRI.getVRegDef(Op1Wide);
+
+ unsigned OpTySize = OpTy.getScalarSizeInBits();
+ // First check that the G_TRUNC feeding the G_UADDO are no-ops, because the
+ // inputs have been zero-extended.
+ if (Op0WideDef->getOpcode() != TargetOpcode::G_ASSERT_ZEXT ||
+ Op1WideDef->getOpcode() != TargetOpcode::G_ASSERT_ZEXT ||
+ OpTySize != Op0WideDef->getOperand(2).getImm() ||
+ OpTySize != Op1WideDef->getOperand(2).getImm())
+ return false;
+
+ // Only scalar UADDO with either 8 or 16 bit operands are handled.
+ if (!WideTy0.isScalar() || !WideTy1.isScalar() || WideTy0 != WideTy1 ||
+ OpTySize >= WideTy0.getScalarSizeInBits() ||
+ (OpTySize != 8 && OpTySize != 16))
+ return false;
+
+ // The overflow-status result must be used by a branch only.
+ Register ResStatus = MI.getOperand(1).getReg();
+ if (!MRI.hasOneNonDBGUse(ResStatus))
+ return false;
+ MachineInstr *CondUser = &*MRI.use_instr_nodbg_begin(ResStatus);
+ if (CondUser->getOpcode() != TargetOpcode::G_BRCOND)
+ return false;
+
+ // Make sure the computed result is only used in the no-overflow blocks.
+ MachineBasicBlock *CurrentMBB = MI.getParent();
+ MachineBasicBlock *FailMBB = CondUser->getOperand(1).getMBB();
+ if (!FailMBB->succ_empty() || CondUser->getParent() != CurrentMBB)
+ return false;
+ if (any_of(MRI.use_nodbg_instructions(ResVal),
+ [&MI, FailMBB, CurrentMBB](MachineInstr &I) {
+ return &MI != &I &&
+ (I.getParent() == FailMBB || I.getParent() == CurrentMBB);
+ }))
+ return false;
+
+ // Remove G_ADDO.
+ B.setInstrAndDebugLoc(*MI.getNextNode());
+ MI.eraseFromParent();
+
+ // Emit wide add.
+ Register AddDst = MRI.cloneVirtualRegister(Op0Wide);
+ B.buildInstr(TargetOpcode::G_ADD, {AddDst}, {Op0Wide, Op1Wide});
+
+ // Emit check of the 9th or 17th bit and update users (the branch). This will
+ // later be folded to TBNZ.
+ Register CondBit = MRI.cloneVirtualRegister(Op0Wide);
+ B.buildAnd(
+ CondBit, AddDst,
+ B.buildConstant(LLT::scalar(32), OpTySize == 8 ? 1 << 8 : 1 << 16));
+ B.buildICmp(CmpInst::ICMP_NE, ResStatus, CondBit,
+ B.buildConstant(LLT::scalar(32), 0));
+
+ // Update ZEXts users of the result value. Because all uses are in the
+ // no-overflow case, we know that the top bits are 0 and we can ignore ZExts.
+ B.buildZExtOrTrunc(ResVal, AddDst);
+ for (MachineOperand &U : make_early_inc_range(MRI.use_operands(ResVal))) {
+ Register WideReg;
+ if (mi_match(U.getParent(), MRI, m_GZExt(m_Reg(WideReg)))) {
+ auto OldR = U.getParent()->getOperand(0).getReg();
+ Observer.erasingInstr(*U.getParent());
+ U.getParent()->eraseFromParent();
+ Helper.replaceRegWith(MRI, OldR, AddDst);
+ }
+ }
+
+ return true;
+}
+
class AArch64PreLegalizerCombinerHelperState {
protected:
CombinerHelper &Helper;
return Helper.tryCombineConcatVectors(MI);
case TargetOpcode::G_SHUFFLE_VECTOR:
return Helper.tryCombineShuffleVector(MI);
+ case TargetOpcode::G_UADDO:
+ return tryToSimplifyUADDO(MI, B, Helper, Observer);
case TargetOpcode::G_MEMCPY_INLINE:
return Helper.tryEmitMemcpyInline(MI);
case TargetOpcode::G_MEMCPY:
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 8
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[ASSERT_ZEXT1:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY1]], 8
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT1]](s32)
- ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s8), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[UADDO1]], [[C]]
- ; CHECK-NEXT: G_BRCOND [[XOR]](s1), %bb.2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASSERT_ZEXT]], [[ASSERT_ZEXT1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 256
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
- ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
bb.1.entry:
successors: %bb.2(0x00000800), %bb.3(0x7ffff800)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[ASSERT_ZEXT1:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY1]], 16
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT1]](s32)
- ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s16), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[UADDO1]], [[C]]
- ; CHECK-NEXT: G_BRCOND [[XOR]](s1), %bb.2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASSERT_ZEXT]], [[ASSERT_ZEXT1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65536
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s16)
- ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
bb.1:
successors: %bb.2(0x00000800), %bb.3(0x7ffff800)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[ASSERT_ZEXT1:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY1]], 16
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT1]](s32)
- ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s16), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[UADDO1]], [[C]]
- ; CHECK-NEXT: G_BRCOND [[XOR]](s1), %bb.2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASSERT_ZEXT]], [[ASSERT_ZEXT1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65536
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: liveins: $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDO]](s16)
- ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
; CHECK-NEXT: BLR renamable $x2, implicit-def dead $lr, implicit $sp, implicit $w0
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
- ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
; CHECK-NEXT: BLR killed renamable $x2, implicit-def dead $lr, implicit $sp, implicit $w0
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
; CHECK-NEXT: RET_ReallyLR
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[ASSERT_ZEXT1:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY1]], 16
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT1]](s32)
- ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s16), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[UADDO1]], [[C]]
- ; CHECK-NEXT: G_BRCOND [[XOR]](s1), %bb.2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASSERT_ZEXT]], [[ASSERT_ZEXT1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65536
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDO]](s16)
- ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
bb.1:
successors: %bb.2(0x00000800), %bb.3(0x7ffff800)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[ASSERT_ZEXT1:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY1]], 16
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT1]](s32)
- ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s16), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: DBG_VALUE [[UADDO1]](s1)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[UADDO1]], [[C]]
- ; CHECK-NEXT: G_BRCOND [[XOR]](s1), %bb.2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASSERT_ZEXT]], [[ASSERT_ZEXT1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65536
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
+ ; CHECK-NEXT: DBG_VALUE [[ICMP]](s1)
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s16)
- ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
bb.1:
successors: %bb.2(0x00000800), %bb.3(0x7ffff800)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[ASSERT_ZEXT1:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY1]], 16
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT1]](s32)
- ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s16), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: DBG_VALUE [[UADDO]](s16)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[UADDO1]], [[C]]
- ; CHECK-NEXT: G_BRCOND [[XOR]](s1), %bb.2
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASSERT_ZEXT]], [[ASSERT_ZEXT1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65536
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
+ ; CHECK-NEXT: DBG_VALUE $noreg
+ ; CHECK-NEXT: G_BRCOND [[ICMP]](s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s16)
- ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
bb.1:
successors: %bb.2(0x00000800), %bb.3(0x7ffff800)