From: Joerg Sonnenberger Date: Sat, 27 Jul 2019 18:57:59 +0000 (+0000) Subject: Stricter check for the memory access. X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=791951bd32ac9af46a8c01d841912059ce8cc8cb;p=platform%2Fupstream%2Fllvm.git Stricter check for the memory access. The current pattern would trigger for scheduling changes of the post-load computation, since those are commutable with the inline asm. Avoid this by explicitly check the order of load vs asm block. llvm-svn: 367180 --- diff --git a/llvm/test/CodeGen/X86/inlineasm-sched-bug.ll b/llvm/test/CodeGen/X86/inlineasm-sched-bug.ll index 25bf5e0..b893496 100644 --- a/llvm/test/CodeGen/X86/inlineasm-sched-bug.ll +++ b/llvm/test/CodeGen/X86/inlineasm-sched-bug.ll @@ -1,7 +1,9 @@ ; PR13504 ; RUN: llc -mtriple=i686-- -mcpu=atom < %s | FileCheck %s +; Check that treemap is read before the asm statement. +; CHECK: movl 8(%{{esp|ebp}}) ; CHECK: bsfl -; CHECK-NOT: movl +; CHECK-NOT: movl 8(%{{esp|ebp}}) define i32 @foo(i32 %treemap) nounwind uwtable { entry: