From: Eli Friedman Date: Fri, 4 Oct 2019 19:51:40 +0000 (+0000) Subject: [ScheduleDAG] When a node is cloned, add an edge between the nodes. X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=23ae13d51f23018eb88a2b78223604d4ccc64cce;p=platform%2Fupstream%2Fllvm.git [ScheduleDAG] When a node is cloned, add an edge between the nodes. InstrEmitter's virtual register handling assumes that clones are emitted after the cloned node. Make sure this assumption actually holds. Fixes a "Node emitted out of order - early" assertion on the testcase. This is probably a very rare case to actually hit in practice; even without the explicit edge, the scheduler will usually end up scheduling the nodes in the expected order due to other constraints. Differential Revision: https://reviews.llvm.org/D68068 llvm-svn: 373782 --- diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index 1598e4d..ff806bd 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -1188,6 +1188,10 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { if (!Pred.isArtificial()) AddPredQueued(NewSU, Pred); + // Make sure the clone comes after the original. (InstrEmitter assumes + // this ordering.) + AddPredQueued(NewSU, SDep(SU, SDep::Artificial)); + // Only copy scheduled successors. Cut them from old node's successor // list and move them over. SmallVector, 4> DelDeps; diff --git a/llvm/test/CodeGen/Thumb/scheduler-clone-cpsr-def.ll b/llvm/test/CodeGen/Thumb/scheduler-clone-cpsr-def.ll new file mode 100644 index 0000000..31e54c4 --- /dev/null +++ b/llvm/test/CodeGen/Thumb/scheduler-clone-cpsr-def.ll @@ -0,0 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv6-linux-gnueabi < %s | FileCheck %s + +; After various DAGCombine optimizations, we end up with an sbcs with +; multiple uses of the cpsr def, and we therefore clone the subs/sbcs. +; Make sure this doesn't crash. +; +; The output here might change at some point in the future, and no +; longer clone the operations; if that happens, there probably isn't any +; straightforward way to fix the test. +define i64 @f(i64 %x2, i32 %z) { +; CHECK-LABEL: f: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: movs r2, #0 +; CHECK-NEXT: subs r3, r0, #1 +; CHECK-NEXT: mov r3, r1 +; CHECK-NEXT: sbcs r3, r2 +; CHECK-NEXT: mov r3, r2 +; CHECK-NEXT: adcs r3, r2 +; CHECK-NEXT: movs r4, #30 +; CHECK-NEXT: subs r5, r0, #1 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: sbcs r5, r2 +; CHECK-NEXT: adcs r4, r2 +; CHECK-NEXT: lsls r2, r1, #1 +; CHECK-NEXT: lsls r2, r4 +; CHECK-NEXT: movs r4, #1 +; CHECK-NEXT: eors r4, r3 +; CHECK-NEXT: lsrs r0, r4 +; CHECK-NEXT: orrs r0, r2 +; CHECK-NEXT: lsrs r1, r4 +; CHECK-NEXT: pop {r4, r5, r7, pc} + %x3 = add nsw i64 %x2, -1 + %x8 = icmp ne i64 %x2, 0 + %x9 = xor i1 %x8, true + %x10 = zext i1 %x9 to i64 + %x11 = lshr i64 %x2, %x10 + ret i64 %x11 +}