Register CarryInReg = I.getOperand(4).getReg();
const LLT DstTy = MRI.getType(DstReg);
+ assert(DstTy.isScalar() && "G_UADDE only supported for scalar types");
- if (DstTy != LLT::scalar(32))
- return false;
+ // TODO: Handle immediate argument variants?
+ unsigned OpADC, OpADD;
+ switch (DstTy.getSizeInBits()) {
+ case 8:
+ OpADC = X86::ADC8rr;
+ OpADD = X86::ADD8rr;
+ break;
+ case 16:
+ OpADC = X86::ADC16rr;
+ OpADD = X86::ADD16rr;
+ break;
+ case 32:
+ OpADC = X86::ADC32rr;
+ OpADD = X86::ADD32rr;
+ break;
+ case 64:
+ OpADC = X86::ADC64rr;
+ OpADD = X86::ADD64rr;
+ break;
+ default:
+ llvm_unreachable("Can't select G_UADDE, unsupported type.");
+ }
+
+ const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
+ const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
// find CarryIn def instruction.
MachineInstr *Def = MRI.getVRegDef(CarryInReg);
Def = MRI.getVRegDef(CarryInReg);
}
- unsigned Opcode;
+ unsigned Opcode = 0;
if (Def->getOpcode() == TargetOpcode::G_UADDE) {
// carry set by prev ADD.
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
.addReg(CarryInReg);
- if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
+ if (!RBI.constrainGenericRegister(CarryInReg, *DstRC, MRI))
return false;
- Opcode = X86::ADC32rr;
+ Opcode = OpADC;
} else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
// carry is constant, support only 0.
if (*val != 0)
return false;
- Opcode = X86::ADD32rr;
+ Opcode = OpADD;
} else
return false;
.addReg(X86::EFLAGS);
if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
- !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
+ !RBI.constrainGenericRegister(CarryOutReg, *DstRC, MRI))
return false;
I.eraseFromParent();
.clampScalar(0, s8, sMaxScalar)
.scalarize(0);
- // TODO: Add all legal scalar types.
// TODO: Add G_UADDO/G_USUBO/G_USUBE handling
getActionDefinitionsBuilder(G_UADDE)
- .legalFor({{s32, s1}})
+ .legalIf([=](const LegalityQuery &Query) -> bool {
+ return typePairInSet(0, 1, {{s8, s1}, {s16, s1}, {s32, s1}})(Query) ||
+ (Is64Bit && typePairInSet(0, 1, {{s64, s1}})(Query));
+ })
.widenScalarToNextPow2(0, /*Min=*/32)
- .clampScalar(0, s32, s32)
+ .clampScalar(0, s8, sMaxScalar)
+ .clampScalar(1, s1, s1)
.scalarize(0);
// integer multiply
; X64-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF]](s128)
; X64-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF1]](s128)
; X64-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
- ; X64-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
- ; X64-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
- ; X64-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV4]], [[UV6]], [[UADDO1]]
- ; X64-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDE1]]
- ; X64-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDE]](s32), [[UADDE2]](s32)
- ; X64-NEXT: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UADDO]](s64), [[MV]](s64)
- ; X64-NEXT: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV1]](s128)
- ; X64-NEXT: $rax = COPY [[UV8]](s64)
- ; X64-NEXT: $rdx = COPY [[UV9]](s64)
+ ; X64-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
+ ; X64-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UADDO]](s64), [[UADDE]](s64)
+ ; X64-NEXT: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
+ ; X64-NEXT: $rax = COPY [[UV4]](s64)
+ ; X64-NEXT: $rdx = COPY [[UV5]](s64)
; X64-NEXT: RET 0
; X32-LABEL: name: test_add_i128
; X32: [[DEF:%[0-9]+]]:_(s128) = IMPLICIT_DEF