return DAG.getSetCC(DL, VT, LHS.getOperand(0),
DAG.getConstant(0, DL, SrcVT), CC);
}
+
+ // With C as a power of 2 and C != 0 and C != INT_MIN:
+ // icmp eq Abs(X) C ->
+ // (icmp eq A, C) | (icmp eq A, -C)
+ // icmp ne Abs(X) C ->
+ // (icmp ne A, C) & (icmp ne A, -C)
+ // Both of these patterns can be better optimized in
+ // DAGCombiner::foldAndOrOfSETCC. Note this only applies for scalar
+ // integers which is checked above.
+ if (LHS.getOpcode() == ISD::ABS && LHS.hasOneUse()) {
+ if (auto *C = dyn_cast<ConstantSDNode>(RHS)) {
+ const APInt &CInt = C->getAPIntValue();
+ // We can better optimize this case in DAGCombiner::foldAndOrOfSETCC.
+ if (CInt.isPowerOf2() && !CInt.isMinSignedValue()) {
+ SDValue BaseOp = LHS.getOperand(0);
+ SDValue SETCC0 = DAG.getSetCC(DL, VT, BaseOp, RHS, CC);
+ SDValue SETCC1 = DAG.getSetCC(
+ DL, VT, BaseOp, DAG.getConstant(-CInt, DL, OpVT), CC);
+ return DAG.getNode(CC == ISD::SETEQ ? ISD::OR : ISD::AND, DL, VT,
+ SETCC0, SETCC1);
+ }
+ }
+ }
}
}
; X86-LABEL: abs_eq_pow2:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: cmpl $4, %eax
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: testl $-9, %eax
; X86-NEXT: sete %al
; X86-NEXT: retl
;
; X64-LABEL: abs_eq_pow2:
; X64: # %bb.0:
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: negl %eax
-; X64-NEXT: cmovsl %edi, %eax
-; X64-NEXT: cmpl $4, %eax
+; X64-NEXT: addl $4, %edi
+; X64-NEXT: testl $-9, %edi
; X64-NEXT: sete %al
; X64-NEXT: retq
%2 = tail call i32 @llvm.abs.i32(i32 %0, i1 true)
; X86-LABEL: abs_ne_pow2:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: sarl $31, %ecx
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: xorl %ecx, %edx
-; X86-NEXT: subl %ecx, %edx
-; X86-NEXT: sbbl %ecx, %eax
-; X86-NEXT: xorl $2, %edx
-; X86-NEXT: orl %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl $2, %eax
+; X86-NEXT: adcl $0, %ecx
+; X86-NEXT: andl $-5, %eax
+; X86-NEXT: orl %ecx, %eax
; X86-NEXT: setne %al
; X86-NEXT: retl
;
; X64-LABEL: abs_ne_pow2:
; X64: # %bb.0:
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: negq %rax
-; X64-NEXT: cmovsq %rdi, %rax
-; X64-NEXT: cmpq $2, %rax
+; X64-NEXT: addq $2, %rdi
+; X64-NEXT: testq $-5, %rdi
; X64-NEXT: setne %al
; X64-NEXT: retq
%2 = tail call i64 @llvm.abs.i64(i64 %0, i1 true)