[X86] Preserve !pcsections on atomic intrinsics
authorMarco Elver <elver@google.com>
Wed, 29 Mar 2023 08:25:10 +0000 (10:25 +0200)
committerMarco Elver <elver@google.com>
Wed, 29 Mar 2023 08:27:24 +0000 (10:27 +0200)
Preserve !pcsections metadata on X86-only atomic intrinsics when
expanding higher-level atomics.

Differential Revision: https://reviews.llvm.org/D147123

llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/pcsections-atomics.ll

index b2b816f..68af565 100644 (file)
@@ -32127,6 +32127,7 @@ X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
 
 void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
   IRBuilder<> Builder(AI);
+  Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
   Intrinsic::ID IID_C = Intrinsic::not_intrinsic;
   Intrinsic::ID IID_I = Intrinsic::not_intrinsic;
   switch (AI->getOperation()) {
@@ -32267,6 +32268,7 @@ static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {
 void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
     AtomicRMWInst *AI) const {
   IRBuilder<> Builder(AI);
+  Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
   Instruction *TempI = nullptr;
   LLVMContext &Ctx = AI->getContext();
   ICmpInst *ICI = dyn_cast<ICmpInst>(AI->user_back());
@@ -32392,6 +32394,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
       return nullptr;
 
   IRBuilder<> Builder(AI);
+  Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
   auto SSID = AI->getSyncScopeID();
   // We must restrict the ordering to avoid generating loads with Release or
index 6881cb3..8567865 100644 (file)
@@ -9916,4 +9916,67 @@ entry:
   ret void
 }
 
+define i64 @atomic_use_cond(ptr %a) {
+; O0-LABEL: atomic_use_cond:
+; O0:       # %bb.0: # %entry
+; O0-NEXT:  .Lpcsection412:
+; O0-NEXT:    lock decq (%rdi)
+; O0-NEXT:  .Lpcsection413:
+; O0-NEXT:    sete %al
+; O0-NEXT:    testb $1, %al
+; O0-NEXT:    je .LBB197_2
+; O0-NEXT:  # %bb.1: # %then
+; O0-NEXT:    movl $1, %eax
+; O0-NEXT:    retq
+; O0-NEXT:  .LBB197_2: # %else
+; O0-NEXT:    movl $2, %eax
+; O0-NEXT:    retq
+;
+; O1-LABEL: atomic_use_cond:
+; O1:       # %bb.0: # %entry
+; O1-NEXT:  .Lpcsection327:
+; O1-NEXT:    lock decq (%rdi)
+; O1-NEXT:    jne .LBB197_2
+; O1-NEXT:  # %bb.1: # %then
+; O1-NEXT:    movl $1, %eax
+; O1-NEXT:    retq
+; O1-NEXT:  .LBB197_2: # %else
+; O1-NEXT:    movl $2, %eax
+; O1-NEXT:    retq
+;
+; O2-LABEL: atomic_use_cond:
+; O2:       # %bb.0: # %entry
+; O2-NEXT:  .Lpcsection327:
+; O2-NEXT:    lock decq (%rdi)
+; O2-NEXT:    jne .LBB197_2
+; O2-NEXT:  # %bb.1: # %then
+; O2-NEXT:    movl $1, %eax
+; O2-NEXT:    retq
+; O2-NEXT:  .LBB197_2: # %else
+; O2-NEXT:    movl $2, %eax
+; O2-NEXT:    retq
+;
+; O3-LABEL: atomic_use_cond:
+; O3:       # %bb.0: # %entry
+; O3-NEXT:  .Lpcsection327:
+; O3-NEXT:    lock decq (%rdi)
+; O3-NEXT:    jne .LBB197_2
+; O3-NEXT:  # %bb.1: # %then
+; O3-NEXT:    movl $1, %eax
+; O3-NEXT:    retq
+; O3-NEXT:  .LBB197_2: # %else
+; O3-NEXT:    movl $2, %eax
+; O3-NEXT:    retq
+entry:
+  %x = atomicrmw sub ptr %a, i64 1 seq_cst, align 8, !pcsections !0
+  %y = icmp eq i64 %x, 1
+  br i1 %y, label %then, label %else
+
+then:
+  ret i64 1
+
+else:
+  ret i64 2
+}
+
 !0 = !{!"somesection"}