From cc9a9cf2378493ac3ba1f71a754561abbe0a766b Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Thu, 30 May 2019 19:27:19 +0000 Subject: [PATCH] [DAGCombine] ((A-c1)+c2) -> (A+(c2-c1)) constant-fold Summary: This was the root cause of the endless combine loop in D62257 https://rise4fun.com/Alive/d3W Reviewers: RKSimon, spatel, craig.topper, t.p.northover Reviewed By: t.p.northover Subscribers: t.p.northover, javed.absar, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D62662 llvm-svn: 362131 --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 9 +++++++++ .../CodeGen/AArch64/addsub-constant-folding.ll | 22 ++++++---------------- llvm/test/CodeGen/X86/addsub-constant-folding.ll | 12 ++---------- 3 files changed, 17 insertions(+), 26 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 49cd759..9a06077 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2161,6 +2161,15 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) { return N0; if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) { + // fold ((A-c1)+c2) -> (A+(c2-c1)) + if (N0.getOpcode() == ISD::SUB && + isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true)) { + SDValue Sub = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N1.getNode(), + N0.getOperand(1).getNode()); + assert(Sub && "Constant folding failed"); + return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Sub); + } + // fold ((c1-A)+c2) -> (c1+c2)-A if (N0.getOpcode() == ISD::SUB && isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) { diff --git a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll index 38c3dfd..a1bf779 100644 --- a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll +++ b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll @@ -157,9 +157,7 @@ define <4 x i32> @add_const_const_sub_nonsplat(<4 x i32> %arg) { define <4 x i32> @sub_const_add_const(<4 x i32> %arg) { ; CHECK-LABEL: sub_const_add_const: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #8 -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: movi v1.4s, #2 +; CHECK-NEXT: mvni v1.4s, #5 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %t0 = sub <4 x i32> %arg, @@ -175,12 +173,12 @@ define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: movi v1.4s, #8 -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s ; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s ; CHECK-NEXT: bl use ; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload -; CHECK-NEXT: movi v0.4s, #2 +; CHECK-NEXT: mvni v0.4s, #5 ; CHECK-NEXT: add v0.4s, v1.4s, v0.4s ; CHECK-NEXT: add sp, sp, #32 // =32 ; CHECK-NEXT: ret @@ -195,10 +193,7 @@ define <4 x i32> @sub_const_add_const_nonsplat(<4 x i32> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, .LCPI11_0 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI11_0] -; CHECK-NEXT: adrp x8, .LCPI11_1 -; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI11_1] -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %t0 = sub <4 x i32> %arg, %t1 = add <4 x i32> %t0, @@ -412,9 +407,7 @@ define <4 x i32> @const_sub_sub_const_nonsplat(<4 x i32> %arg) { define <4 x i32> @const_sub_const_sub(<4 x i32> %arg) { ; CHECK-LABEL: const_sub_const_sub: ; CHECK: // %bb.0: -; CHECK-NEXT: movi v1.4s, #8 -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: movi v1.4s, #2 +; CHECK-NEXT: mvni v1.4s, #5 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %t0 = sub <4 x i32> , %arg @@ -450,10 +443,7 @@ define <4 x i32> @const_sub_const_sub_nonsplat(<4 x i32> %arg) { ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, .LCPI26_0 ; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI26_0] -; CHECK-NEXT: adrp x8, .LCPI26_1 -; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI26_1] -; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s -; CHECK-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %t0 = sub <4 x i32> , %arg %t1 = sub <4 x i32> , %t0 diff --git a/llvm/test/CodeGen/X86/addsub-constant-folding.ll b/llvm/test/CodeGen/X86/addsub-constant-folding.ll index dc9ee4c..c3cbe5e 100644 --- a/llvm/test/CodeGen/X86/addsub-constant-folding.ll +++ b/llvm/test/CodeGen/X86/addsub-constant-folding.ll @@ -222,13 +222,11 @@ define <4 x i32> @add_const_const_sub_nonsplat(<4 x i32> %arg) { define <4 x i32> @sub_const_add_const(<4 x i32> %arg) { ; X86-LABEL: sub_const_add_const: ; X86: # %bb.0: -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: sub_const_add_const: ; X64: # %bb.0: -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 ; X64-NEXT: paddd {{.*}}(%rip), %xmm0 ; X64-NEXT: retq %t0 = sub <4 x i32> %arg, @@ -241,8 +239,8 @@ define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { ; X86: # %bb.0: ; X86-NEXT: subl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 32 -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill +; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: calll use ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload ; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 @@ -254,8 +252,8 @@ define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { ; X64: # %bb.0: ; X64-NEXT: subq $24, %rsp ; X64-NEXT: .cfi_def_cfa_offset 32 -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 ; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 ; X64-NEXT: callq use ; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; X64-NEXT: paddd {{.*}}(%rip), %xmm0 @@ -271,13 +269,11 @@ define <4 x i32> @sub_const_add_const_extrause(<4 x i32> %arg) { define <4 x i32> @sub_const_add_const_nonsplat(<4 x i32> %arg) { ; X86-LABEL: sub_const_add_const_nonsplat: ; X86: # %bb.0: -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: sub_const_add_const_nonsplat: ; X64: # %bb.0: -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 ; X64-NEXT: paddd {{.*}}(%rip), %xmm0 ; X64-NEXT: retq %t0 = sub <4 x i32> %arg, @@ -588,13 +584,11 @@ define <4 x i32> @const_sub_sub_const_nonsplat(<4 x i32> %arg) { define <4 x i32> @const_sub_const_sub(<4 x i32> %arg) { ; X86-LABEL: const_sub_const_sub: ; X86: # %bb.0: -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: const_sub_const_sub: ; X64: # %bb.0: -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 ; X64-NEXT: paddd {{.*}}(%rip), %xmm0 ; X64-NEXT: retq %t0 = sub <4 x i32> , %arg @@ -642,13 +636,11 @@ define <4 x i32> @const_sub_const_sub_extrause(<4 x i32> %arg) { define <4 x i32> @const_sub_const_sub_nonsplat(<4 x i32> %arg) { ; X86-LABEL: const_sub_const_sub_nonsplat: ; X86: # %bb.0: -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: const_sub_const_sub_nonsplat: ; X64: # %bb.0: -; X64-NEXT: psubd {{.*}}(%rip), %xmm0 ; X64-NEXT: paddd {{.*}}(%rip), %xmm0 ; X64-NEXT: retq %t0 = sub <4 x i32> , %arg -- 2.7.4