From: Craig Topper Date: Thu, 26 Jul 2018 05:40:10 +0000 (+0000) Subject: [X86] Don't use CombineTo to skip adding new nodes to the DAGCombiner worklist in... X-Git-Tag: llvmorg-7.0.0-rc1~541 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4e687d5bb2b9a4dcde599b2563a5edc1aff44e62;p=platform%2Fupstream%2Fllvm.git [X86] Don't use CombineTo to skip adding new nodes to the DAGCombiner worklist in combineMul. I'm not sure if this was trying to avoid optimizing the new nodes further or what. Or maybe to prevent a cycle if something tried to reform the multiply? But I don't think its a reliable way to do that. If the user of the expanded multiply is visited by the DAGCombiner after this conversion happens, the DAGCombiner will check its operands, see that they haven't been visited by the DAGCombiner before and it will then add the first node to the worklist. This process will repeat until all the new nodes are visited. So this seems like an unreliable prevention at best. So this patch just returns the new nodes like any other combine. If this starts causing problems we can try to add target specific nodes or something to more directly prevent optimizations. Now that we handle the combine normally, we can combine any negates the mul expansion creates into their users since those will be visited now. llvm-svn: 338007 --- diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 48093b4..c53c8d1 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -33970,11 +33970,7 @@ static SDValue combineMul(SDNode *N, SelectionDAG &DAG, } } - if (NewMul) - // Do not add new nodes to DAG combiner worklist. - return DCI.CombineTo(N, NewMul, false); - - return SDValue(); + return NewMul; } static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) { diff --git a/llvm/test/CodeGen/X86/mul-constant-i32.ll b/llvm/test/CodeGen/X86/mul-constant-i32.ll index 79b13a2..356d5a0 100644 --- a/llvm/test/CodeGen/X86/mul-constant-i32.ll +++ b/llvm/test/CodeGen/X86/mul-constant-i32.ll @@ -2081,25 +2081,25 @@ define i32 @mul_neg_fold(i32 %a, i32 %b) { ; X86-LABEL: mul_neg_fold: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: leal (%eax,%eax,8), %eax -; X86-NEXT: negl %eax -; X86-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: leal (%ecx,%ecx,8), %ecx +; X86-NEXT: subl %ecx, %eax ; X86-NEXT: retl ; ; X64-HSW-LABEL: mul_neg_fold: ; X64-HSW: # %bb.0: ; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi ; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] -; X64-HSW-NEXT: negl %eax # sched: [1:0.25] -; X64-HSW-NEXT: addl %esi, %eax # sched: [1:0.25] +; X64-HSW-NEXT: subl %eax, %esi # sched: [1:0.25] +; X64-HSW-NEXT: movl %esi, %eax # sched: [1:0.25] ; X64-HSW-NEXT: retq # sched: [7:1.00] ; ; X64-JAG-LABEL: mul_neg_fold: ; X64-JAG: # %bb.0: ; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi ; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [2:1.00] -; X64-JAG-NEXT: negl %eax # sched: [1:0.50] -; X64-JAG-NEXT: addl %esi, %eax # sched: [1:0.50] +; X64-JAG-NEXT: subl %eax, %esi # sched: [1:0.50] +; X64-JAG-NEXT: movl %esi, %eax # sched: [1:0.50] ; X64-JAG-NEXT: retq # sched: [4:1.00] ; ; X86-NOOPT-LABEL: mul_neg_fold: @@ -2124,8 +2124,8 @@ define i32 @mul_neg_fold(i32 %a, i32 %b) { ; X64-SLM: # %bb.0: ; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00] -; X64-SLM-NEXT: negl %eax # sched: [1:0.50] -; X64-SLM-NEXT: addl %esi, %eax # sched: [1:0.50] +; X64-SLM-NEXT: subl %eax, %esi # sched: [1:0.50] +; X64-SLM-NEXT: movl %esi, %eax # sched: [1:0.50] ; X64-SLM-NEXT: retq # sched: [4:1.00] ; ; SLM-NOOPT-LABEL: mul_neg_fold: