X86: remove temporary atomicrmw used during lowering.
authorTim Northover <tnorthover@apple.com>
Mon, 14 Jul 2014 15:31:13 +0000 (15:31 +0000)
committerTim Northover <tnorthover@apple.com>
Mon, 14 Jul 2014 15:31:13 +0000 (15:31 +0000)
We construct a temporary "atomicrmw xchg" instruction when lowering atomic
stores for widths that aren't supported natively. This isn't on the top-level
worklist though, so it won't be removed automatically and we have to do it
ourselves once that itself has been lowered.

Thanks Saleem for pointing this out!

llvm-svn: 212948

llvm/lib/Target/X86/X86AtomicExpandPass.cpp
llvm/test/CodeGen/X86/atomic128.ll

index 1637b55..61eefbb 100644 (file)
@@ -277,8 +277,11 @@ bool X86AtomicExpandPass::expandAtomicStore(StoreInst *SI) {
                               SI->getValueOperand(), Order);
 
   // Now we have an appropriate swap instruction, lower it as usual.
-  if (shouldExpandAtomicRMW(AI))
-    return expandAtomicRMW(AI);
+  if (shouldExpandAtomicRMW(AI)) {
+    expandAtomicRMW(AI);
+    AI->eraseFromParent();
+    return true;
+  }
 
   return AI;
 }
index ddc53a5..741d290 100644 (file)
@@ -277,6 +277,7 @@ define void @atomic_store_seq_cst(i128* %p, i128 %in) {
 ; CHECK:         lock
 ; CHECK:         cmpxchg16b (%rdi)
 ; CHECK:         jne [[LOOP]]
+; CHECK-NOT:     callq ___sync_lock_test_and_set_16
 
    store atomic i128 %in, i128* %p seq_cst, align 16
    ret void