If we're not relying on the flag result, we can fold the constants together into the RHS immediate operand and set the LHS operand to zero, simplifying for further folds.
We could do something similar if the flag result is in use and the constant fold doesn't affect it, but I don't have any real test cases for this yet.
As suggested by @davezarzycki on Issue #35256
Differential Revision: https://reviews.llvm.org/D122482
return DCI.CombineTo(N, Res1, CarryOut);
}
+ // Fold ADC(C1,C2,Carry) -> ADC(0,C1+C2,Carry)
+ // iff the flag result is dead.
+ // TODO: Allow flag result if C1+C2 doesn't signed/unsigned overflow.
+ if (LHSC && RHSC && !LHSC->isZero() && !N->hasAnyUseOfValue(1)) {
+ SDLoc DL(N);
+ APInt Sum = LHSC->getAPIntValue() + RHSC->getAPIntValue();
+ return DAG.getNode(X86ISD::ADC, DL, N->getVTList(),
+ DAG.getConstant(0, DL, LHS.getValueType()),
+ DAG.getConstant(Sum, DL, LHS.getValueType()), CarryIn);
+ }
+
if (SDValue Flags = combineCarryThroughADD(CarryIn, DAG)) {
MVT VT = N->getSimpleValueType(0);
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
define void @rv_marker_2_select(i32 %c) {
; CHECK-LABEL: rv_marker_2_select:
-; CHECK: pushq %rax
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: cmpl $1, %edi
-; CHECK-NEXT: movl $1, %edi
-; CHECK-NEXT: adcl $0, %edi
-; CHECK-NEXT: callq _foo0
-; CHECK-NEXT: movq %rax, %rdi
-; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue
-; CHECK-NEXT: movq %rax, %rdi
-; CHECK-NEXT: popq %rax
-; CHECK-NEXT: jmp _foo2
+; CHECK: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: adcl $1, %eax
+; CHECK-NEXT: movl %eax, %edi
+; CHECK-NEXT: callq _foo0
+; CHECK-NEXT: movq %rax, %rdi
+; CHECK-NEXT: callq _objc_retainAutoreleasedReturnValue
+; CHECK-NEXT: movq %rax, %rdi
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: jmp _foo2
;
entry:
%tobool.not = icmp eq i32 %c, 0
ret i32 %10
}
-; FIXME: Fail to add (non-overflowing) constants together
; FIXME: Fail to convert add+lshr+and to BT
define i32 @adc_merge_constants(i32 %a0) nounwind {
; X86-LABEL: adc_merge_constants:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: shrl $11, %eax
-; X86-NEXT: andb $1, %al
-; X86-NEXT: addb $-1, %al
-; X86-NEXT: movl $55, %eax
-; X86-NEXT: adcl $-1, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shrl $11, %ecx
+; X86-NEXT: andb $1, %cl
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: addb $-1, %cl
+; X86-NEXT: adcl $54, %eax
; X86-NEXT: retl
;
; X64-LABEL: adc_merge_constants:
; X64: # %bb.0:
; X64-NEXT: shrl $11, %edi
; X64-NEXT: andb $1, %dil
+; X64-NEXT: xorl %eax, %eax
; X64-NEXT: addb $-1, %dil
-; X64-NEXT: movl $55, %eax
-; X64-NEXT: adcl $-1, %eax
+; X64-NEXT: adcl $54, %eax
; X64-NEXT: retq
%bit = lshr i32 %a0, 11
%mask = and i32 %bit, 1
; CHECK-LABEL: PR51238:
; CHECK: # %bb.0:
; CHECK-NEXT: notb %cl
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: addb %dl, %cl
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: adcb $0, %al
+; CHECK-NEXT: adcb $1, %al
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%ny = xor i8 %y, -1
%nz = xor i8 %z, -1
define i64 @main(i1 %tobool1) nounwind {
; CHECK-LABEL: main:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushl %esi
; CHECK-NEXT: testb $1, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl $-12, %eax
-; CHECK-NEXT: movl $-1, %ecx
-; CHECK-NEXT: cmovel %ecx, %eax
+; CHECK-NEXT: movl $-12, %ecx
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: cmovnel %ecx, %eax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: movl %eax, %edx
+; CHECK-NEXT: addl $-1, %edx
+; CHECK-NEXT: movl $0, %edx
+; CHECK-NEXT: adcl $-2, %edx
+; CHECK-NEXT: cmovsl %ecx, %eax
; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: movl %eax, %esi
-; CHECK-NEXT: addl $-1, %esi
-; CHECK-NEXT: adcl $-1, %ecx
-; CHECK-NEXT: cmovsl %edx, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
entry:
%0 = zext i1 %tobool1 to i32
define i64 @test4(i64 %a, i64 %b) nounwind {
; ILP-LABEL: test4:
; ILP: # %bb.0:
+; ILP-NEXT: xorl %eax, %eax
; ILP-NEXT: xorl %ecx, %ecx
-; ILP-NEXT: xorl %edx, %edx
; ILP-NEXT: incq %rsi
-; ILP-NEXT: sete %dl
-; ILP-NEXT: movl $2, %eax
+; ILP-NEXT: sete %cl
; ILP-NEXT: cmpq %rdi, %rsi
-; ILP-NEXT: sbbq $0, %rdx
-; ILP-NEXT: movl $0, %edx
-; ILP-NEXT: sbbq %rdx, %rdx
+; ILP-NEXT: sbbq $0, %rcx
+; ILP-NEXT: movl $0, %ecx
; ILP-NEXT: sbbq %rcx, %rcx
-; ILP-NEXT: adcq $-1, %rax
+; ILP-NEXT: movl $0, %ecx
+; ILP-NEXT: sbbq %rcx, %rcx
+; ILP-NEXT: adcq $1, %rax
; ILP-NEXT: retq
;
; HYBRID-LABEL: test4:
; HYBRID: # %bb.0:
+; HYBRID-NEXT: xorl %eax, %eax
; HYBRID-NEXT: xorl %ecx, %ecx
-; HYBRID-NEXT: xorl %edx, %edx
; HYBRID-NEXT: incq %rsi
-; HYBRID-NEXT: sete %dl
-; HYBRID-NEXT: movl $2, %eax
+; HYBRID-NEXT: sete %cl
; HYBRID-NEXT: cmpq %rdi, %rsi
-; HYBRID-NEXT: sbbq $0, %rdx
-; HYBRID-NEXT: movl $0, %edx
-; HYBRID-NEXT: sbbq %rdx, %rdx
+; HYBRID-NEXT: sbbq $0, %rcx
+; HYBRID-NEXT: movl $0, %ecx
+; HYBRID-NEXT: sbbq %rcx, %rcx
+; HYBRID-NEXT: movl $0, %ecx
; HYBRID-NEXT: sbbq %rcx, %rcx
-; HYBRID-NEXT: adcq $-1, %rax
+; HYBRID-NEXT: adcq $1, %rax
; HYBRID-NEXT: retq
;
; BURR-LABEL: test4:
; BURR: # %bb.0:
+; BURR-NEXT: xorl %eax, %eax
; BURR-NEXT: xorl %ecx, %ecx
-; BURR-NEXT: xorl %edx, %edx
; BURR-NEXT: incq %rsi
-; BURR-NEXT: sete %dl
-; BURR-NEXT: movl $2, %eax
+; BURR-NEXT: sete %cl
; BURR-NEXT: cmpq %rdi, %rsi
-; BURR-NEXT: sbbq $0, %rdx
-; BURR-NEXT: movl $0, %edx
-; BURR-NEXT: sbbq %rdx, %rdx
+; BURR-NEXT: sbbq $0, %rcx
+; BURR-NEXT: movl $0, %ecx
+; BURR-NEXT: sbbq %rcx, %rcx
+; BURR-NEXT: movl $0, %ecx
; BURR-NEXT: sbbq %rcx, %rcx
-; BURR-NEXT: adcq $-1, %rax
+; BURR-NEXT: adcq $1, %rax
; BURR-NEXT: retq
;
; SRC-LABEL: test4:
; SRC: # %bb.0:
-; SRC-NEXT: xorl %eax, %eax
-; SRC-NEXT: incq %rsi
-; SRC-NEXT: sete %al
; SRC-NEXT: xorl %ecx, %ecx
+; SRC-NEXT: incq %rsi
+; SRC-NEXT: sete %cl
+; SRC-NEXT: xorl %eax, %eax
; SRC-NEXT: cmpq %rdi, %rsi
-; SRC-NEXT: sbbq $0, %rax
-; SRC-NEXT: movl $0, %eax
-; SRC-NEXT: sbbq %rax, %rax
+; SRC-NEXT: sbbq $0, %rcx
+; SRC-NEXT: movl $0, %ecx
+; SRC-NEXT: sbbq %rcx, %rcx
+; SRC-NEXT: movl $0, %ecx
; SRC-NEXT: sbbq %rcx, %rcx
-; SRC-NEXT: movl $2, %eax
-; SRC-NEXT: adcq $-1, %rax
+; SRC-NEXT: adcq $1, %rax
; SRC-NEXT: retq
;
; LIN-LABEL: test4:
; LIN: # %bb.0:
-; LIN-NEXT: movl $2, %eax
+; LIN-NEXT: xorl %eax, %eax
; LIN-NEXT: xorl %ecx, %ecx
-; LIN-NEXT: xorl %edx, %edx
; LIN-NEXT: incq %rsi
-; LIN-NEXT: sete %dl
+; LIN-NEXT: sete %cl
; LIN-NEXT: cmpq %rdi, %rsi
-; LIN-NEXT: sbbq $0, %rdx
-; LIN-NEXT: movl $0, %edx
-; LIN-NEXT: sbbq %rdx, %rdx
+; LIN-NEXT: sbbq $0, %rcx
+; LIN-NEXT: movl $0, %ecx
+; LIN-NEXT: sbbq %rcx, %rcx
+; LIN-NEXT: movl $0, %ecx
; LIN-NEXT: sbbq %rcx, %rcx
-; LIN-NEXT: adcq $-1, %rax
+; LIN-NEXT: adcq $1, %rax
; LIN-NEXT: retq
%r = zext i64 %b to i256
%u = add i256 %r, 1
define i32 @t4(i32 %a) {
; CHECK-LABEL: t4:
; CHECK: ## %bb.0:
-; CHECK-NEXT: movq _v4@GOTPCREL(%rip), %rax
-; CHECK-NEXT: cmpl $1, (%rax)
-; CHECK-NEXT: movw $1, %ax
-; CHECK-NEXT: adcw $0, %ax
+; CHECK-NEXT: movq _v4@GOTPCREL(%rip), %rcx
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl $1, (%rcx)
+; CHECK-NEXT: adcw $1, %ax
; CHECK-NEXT: shll $16, %eax
; CHECK-NEXT: retq
%t0 = load i32, i32* @v4, align 4