#include "clang/Basic/Diagnostic.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
EmitNounwindRuntimeCall(fn, values);
}
+/// Emit a call to "clang.arc.noop.use", which consumes the result of a call
+/// that has operand bundle "clang.arc.attachedcall".
+void CodeGenFunction::EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values) {
+ llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_noop_use;
+ if (!fn)
+ fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_noop_use);
+ EmitNounwindRuntimeCall(fn, values);
+}
+
static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) {
if (auto *F = dyn_cast<llvm::Function>(RTF)) {
// If the target runtime doesn't naturally support ARC, emit weak
// with this marker yet, so leave a breadcrumb for the ARC
// optimizer to pick up.
} else {
- const char *markerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
- if (!CGF.CGM.getModule().getModuleFlag(markerKey)) {
+ const char *retainRVMarkerKey = llvm::objcarc::getRVMarkerModuleFlagStr();
+ if (!CGF.CGM.getModule().getModuleFlag(retainRVMarkerKey)) {
auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly);
- CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, markerKey, str);
+ CGF.CGM.getModule().addModuleFlag(llvm::Module::Error,
+ retainRVMarkerKey, str);
}
}
}
CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker));
}
+static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value,
+ bool IsRetainRV,
+ CodeGenFunction &CGF) {
+ emitAutoreleasedReturnValueMarker(CGF);
+
+ // Add operand bundle "clang.arc.attachedcall" to the call instead of emitting
+ // retainRV or claimRV calls in the IR. We currently do this only when the
+ // optimization level isn't -O0 since global-isel, which is currently run at
+ // -O0, doesn't know about the operand bundle.
+
+ // FIXME: Do this when the target isn't aarch64.
+ if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGF.CGM.getTarget().getTriple().isAArch64()) {
+ llvm::Value *bundleArgs[] = {llvm::ConstantInt::get(
+ CGF.Int64Ty,
+ llvm::objcarc::getAttachedCallOperandBundleEnum(IsRetainRV))};
+ llvm::OperandBundleDef OB("clang.arc.attachedcall", bundleArgs);
+ auto *oldCall = cast<llvm::CallBase>(value);
+ llvm::CallBase *newCall = llvm::CallBase::addOperandBundle(
+ oldCall, llvm::LLVMContext::OB_clang_arc_attachedcall, OB, oldCall);
+ newCall->copyMetadata(*oldCall);
+ oldCall->replaceAllUsesWith(newCall);
+ oldCall->eraseFromParent();
+ CGF.EmitARCNoopIntrinsicUse(newCall);
+ return newCall;
+ }
+
+ bool isNoTail =
+ CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail();
+ llvm::CallInst::TailCallKind tailKind =
+ isNoTail ? llvm::CallInst::TCK_NoTail : llvm::CallInst::TCK_None;
+ ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints();
+ llvm::Function *&EP = IsRetainRV
+ ? EPs.objc_retainAutoreleasedReturnValue
+ : EPs.objc_unsafeClaimAutoreleasedReturnValue;
+ llvm::Intrinsic::ID IID =
+ IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue
+ : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue;
+ return emitARCValueOperation(CGF, value, nullptr, EP, IID, tailKind);
+}
+
/// Retain the given object which is the result of a function call.
/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
///
/// call with completely different semantics.
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
- emitAutoreleasedReturnValueMarker(*this);
- llvm::CallInst::TailCallKind tailKind =
- CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail()
- ? llvm::CallInst::TCK_NoTail
- : llvm::CallInst::TCK_None;
- return emitARCValueOperation(
- *this, value, nullptr,
- CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
- llvm::Intrinsic::objc_retainAutoreleasedReturnValue, tailKind);
+ return emitOptimizedARCReturnCall(value, true, *this);
}
/// Claim a possibly-autoreleased return value at +0. This is only
/// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
- emitAutoreleasedReturnValueMarker(*this);
- llvm::CallInst::TailCallKind tailKind =
- CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail()
- ? llvm::CallInst::TCK_NoTail
- : llvm::CallInst::TCK_None;
- return emitARCValueOperation(
- *this, value, nullptr,
- CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
- llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue, tailKind);
+ return emitOptimizedARCReturnCall(value, false, *this);
}
/// Release the given object.
void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
+ void EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values);
+
static Destroyer destroyARCStrongImprecise;
static Destroyer destroyARCStrongPrecise;
static Destroyer destroyARCWeak;
llvm::Type *newRetTy = newFn->getReturnType();
SmallVector<llvm::Value*, 4> newArgs;
- SmallVector<llvm::OperandBundleDef, 1> newBundles;
for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
ui != ue; ) {
newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
// Copy over any operand bundles.
+ SmallVector<llvm::OperandBundleDef, 1> newBundles;
callSite->getOperandBundlesAsDefs(newBundles);
llvm::CallBase *newCall;
/// void clang.arc.use(...);
llvm::Function *clang_arc_use;
+
+ /// void clang.arc.noop.use(...);
+ llvm::Function *clang_arc_noop_use;
};
/// This class records statistics on instrumentation based profiling.
--- /dev/null
+// RUN: %clang_cc1 -triple arm64-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -O -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK
+
+@class A;
+
+A *makeA(void);
+
+void test_assign() {
+ __unsafe_unretained id x;
+ x = makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_assign()
+// CHECK: [[X:%.*]] = alloca i8*
+// CHECK: [[T0:%.*]] = call [[A:.*]]* @makeA() [ "clang.arc.attachedcall"(i64 1) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
+// CHECK-NEXT: store i8* [[T1]], i8** [[X]]
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: ret void
+
+void test_assign_assign() {
+ __unsafe_unretained id x, y;
+ x = y = makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_assign_assign()
+// CHECK: [[X:%.*]] = alloca i8*
+// CHECK: [[Y:%.*]] = alloca i8*
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 1) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
+// CHECK-NEXT: store i8* [[T1]], i8** [[Y]]
+// CHECK-NEXT: store i8* [[T1]], i8** [[X]]
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: ret void
+
+void test_strong_assign_assign() {
+ __strong id x;
+ __unsafe_unretained id y;
+ x = y = makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_strong_assign_assign()
+// CHECK: [[X:%.*]] = alloca i8*
+// CHECK: [[Y:%.*]] = alloca i8*
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 0) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
+// CHECK-NEXT: store i8* [[T1]], i8** [[Y]]
+// CHECK-NEXT: [[OLD:%.*]] = load i8*, i8** [[X]]
+// CHECK-NEXT: store i8* [[T1]], i8** [[X]]
+// CHECK-NEXT: call void @llvm.objc.release(i8* [[OLD]]
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]]
+// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]])
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: ret void
+
+void test_assign_strong_assign() {
+ __unsafe_unretained id x;
+ __strong id y;
+ x = y = makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_assign_strong_assign()
+// CHECK: [[X:%.*]] = alloca i8*
+// CHECK: [[Y:%.*]] = alloca i8*
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 0) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
+// CHECK-NEXT: [[OLD:%.*]] = load i8*, i8** [[Y]]
+// CHECK-NEXT: store i8* [[T1]], i8** [[Y]]
+// CHECK-NEXT: call void @llvm.objc.release(i8* [[OLD]]
+// CHECK-NEXT: store i8* [[T1]], i8** [[X]]
+// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]]
+// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]])
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: ret void
+
+void test_init() {
+ __unsafe_unretained id x = makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_init()
+// CHECK: [[X:%.*]] = alloca i8*
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 1) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
+// CHECK-NEXT: store i8* [[T1]], i8** [[X]]
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: ret void
+
+void test_init_assignment() {
+ __unsafe_unretained id x;
+ __unsafe_unretained id y = x = makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_init_assignment()
+// CHECK: [[X:%.*]] = alloca i8*
+// CHECK: [[Y:%.*]] = alloca i8*
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 1) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
+// CHECK-NEXT: store i8* [[T1]], i8** [[X]]
+// CHECK-NEXT: store i8* [[T1]], i8** [[Y]]
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: ret void
+
+void test_strong_init_assignment() {
+ __unsafe_unretained id x;
+ __strong id y = x = makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_strong_init_assignment()
+// CHECK: [[X:%.*]] = alloca i8*
+// CHECK: [[Y:%.*]] = alloca i8*
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 0) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
+// CHECK-NEXT: store i8* [[T1]], i8** [[X]]
+// CHECK-NEXT: store i8* [[T1]], i8** [[Y]]
+// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[Y]]
+// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]])
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: ret void
+
+void test_init_strong_assignment() {
+ __strong id x;
+ __unsafe_unretained id y = x = makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_init_strong_assignment()
+// CHECK: [[X:%.*]] = alloca i8*
+// CHECK: [[Y:%.*]] = alloca i8*
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 0) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8*
+// CHECK-NEXT: [[OLD:%.*]] = load i8*, i8** [[X]]
+// CHECK-NEXT: store i8* [[T1]], i8** [[X]]
+// CHECK-NEXT: call void @llvm.objc.release(i8* [[OLD]])
+// CHECK-NEXT: store i8* [[T1]], i8** [[Y]]
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[X]]
+// CHECK-NEXT: call void @llvm.objc.release(i8* [[T0]])
+// CHECK-NEXT: bitcast
+// CHECK-NEXT: lifetime.end
+// CHECK-NEXT: ret void
+
+void test_ignored() {
+ makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_ignored()
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 1) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: ret void
+
+void test_cast_to_void() {
+ (void) makeA();
+}
+// CHECK-LABEL: define{{.*}} void @test_cast_to_void()
+// CHECK: [[T0:%.*]] = call [[A]]* @makeA() [ "clang.arc.attachedcall"(i64 1) ]
+// CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use({{.*}} [[T0]])
+// CHECK-NEXT: ret void
+
+// This is always at the end of the module.
+
+// CHECK-OPTIMIZED: !llvm.module.flags = !{!0,
+// CHECK-OPTIMIZED: !0 = !{i32 1, !"clang.arc.retainAutoreleasedReturnValueMarker", !"mov{{.*}}marker for objc_retainAutoreleaseReturnValue"}
// Make sure it works on x86-32.
// RUN: %clang_cc1 -triple i386-apple-darwin11 -fobjc-runtime=macosx-fragile-10.11 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL
-// Make sure it works on ARM.
+// Make sure it works on ARM64.
// RUN: %clang_cc1 -triple arm64-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL
-// RUN: %clang_cc1 -triple arm64-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -O -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPTIMIZED -check-prefix=CALL
-// Make sure it works on ARM64.
+// Make sure it works on ARM.
// RUN: %clang_cc1 -triple armv7-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNOPTIMIZED -check-prefix=CHECK-MARKED -check-prefix=CALL
// RUN: %clang_cc1 -triple armv7-apple-ios9 -fobjc-runtime=ios-9.0 -fobjc-arc -O -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-OPTIMIZED -check-prefix=CALL
:ref:`stackmap entry <statepoint-stackmap-format>`. See the intrinsic description
for further details.
+ObjC ARC Attached Call Operand Bundles
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A ``"clang.arc.attachedcall`` operand bundle on a call indicates the call is
+implicitly followed by a marker instruction and a call to an ObjC runtime
+function that uses the result of the call. If the argument passed to the operand
+bundle is 0, ``@objc_retainAutoreleasedReturnValue`` is called. If 1 is passed,
+``@objc_unsafeClaimAutoreleasedReturnValue`` is called. A call with this bundle
+implicitly uses its return value.
+
+The operand bundle is needed to ensure the call is immediately followed by the
+marker instruction or the ObjC runtime call in the final output.
+
.. _moduleasm:
Module-Level Inline Assembly
--- /dev/null
+//===- ObjCARCUtil.h - ObjC ARC Utility Functions ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines ARC utility functions which are used by various parts of
+/// the compiler.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_ANALYSIS_OBJCARCUTIL_H
+#define LLVM_LIB_ANALYSIS_OBJCARCUTIL_H
+
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/LLVMContext.h"
+
+namespace llvm {
+namespace objcarc {
+
+static inline const char *getRVMarkerModuleFlagStr() {
+ return "clang.arc.retainAutoreleasedReturnValueMarker";
+}
+
+enum AttachedCallOperandBundle : unsigned { RVOB_Retain, RVOB_Claim };
+
+static AttachedCallOperandBundle
+getAttachedCallOperandBundleEnum(bool IsRetain) {
+ return IsRetain ? RVOB_Retain : RVOB_Claim;
+}
+
+static inline bool hasAttachedCallOpBundle(const CallBase *CB, bool IsRetain) {
+ auto B = CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall);
+ if (!B.hasValue())
+ return false;
+ return cast<ConstantInt>(B->Inputs[0])->getZExtValue() ==
+ getAttachedCallOperandBundleEnum(IsRetain);
+}
+
+static inline bool hasAttachedCallOpBundle(const CallBase *CB) {
+ return CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall)
+ .hasValue();
+}
+
+} // end namespace objcarc
+} // end namespace llvm
+
+#endif
static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
Instruction *InsertPt = nullptr);
+ /// Create a clone of \p CB with operand bundle \p OB added.
+ static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
+ OperandBundleDef OB,
+ Instruction *InsertPt = nullptr);
+
+ /// Create a clone of \p CB with operand bundle \p ID removed.
+ static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
+ Instruction *InsertPt = nullptr);
+
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Call ||
I->getOpcode() == Instruction::Invoke ||
llvm_ptr_ty]>;
def int_objc_clang_arc_use : Intrinsic<[],
[llvm_vararg_ty]>;
+def int_objc_clang_arc_noop_use : DefaultAttrsIntrinsic<[],
+ [llvm_vararg_ty],
+ [IntrInaccessibleMemOnly]>;
def int_objc_unsafeClaimAutoreleasedReturnValue : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty]>;
def int_objc_retainedObject : Intrinsic<[llvm_ptr_ty],
/// operand bundle tags without comparing strings. Keep this in sync with
/// LLVMContext::LLVMContext().
enum : unsigned {
- OB_deopt = 0, // "deopt"
- OB_funclet = 1, // "funclet"
- OB_gc_transition = 2, // "gc-transition"
- OB_cfguardtarget = 3, // "cfguardtarget"
- OB_preallocated = 4, // "preallocated"
- OB_gc_live = 5, // "gc-live"
+ OB_deopt = 0, // "deopt"
+ OB_funclet = 1, // "funclet"
+ OB_gc_transition = 2, // "gc-transition"
+ OB_cfguardtarget = 3, // "cfguardtarget"
+ OB_preallocated = 4, // "preallocated"
+ OB_gc_live = 5, // "gc-live"
+ OB_clang_arc_attachedcall = 6, // "clang.arc.attachedcall"
};
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
return ARCInstKind::User;
case Intrinsic::objc_sync_exit:
return ARCInstKind::User;
+ case Intrinsic::objc_clang_arc_noop_use:
case Intrinsic::objc_arc_annotation_topdown_bbstart:
case Intrinsic::objc_arc_annotation_topdown_bbend:
case Intrinsic::objc_arc_annotation_bottomup_bbstart:
// Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
// have to do anything here to lower funclet bundles.
- assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,
- LLVMContext::OB_gc_transition,
- LLVMContext::OB_gc_live,
- LLVMContext::OB_funclet,
- LLVMContext::OB_cfguardtarget}) &&
+ assert(!I.hasOperandBundlesOtherThan(
+ {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
+ LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
+ LLVMContext::OB_cfguardtarget,
+ LLVMContext::OB_clang_arc_attachedcall}) &&
"Cannot lower invokes with arbitrary operand bundles yet!");
const Value *Callee(I.getCalledOperand());
// CFGuardTarget bundles are lowered in LowerCallTo.
assert(!I.hasOperandBundlesOtherThan(
{LLVMContext::OB_deopt, LLVMContext::OB_funclet,
- LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated}) &&
+ LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
+ LLVMContext::OB_clang_arc_attachedcall}) &&
"Cannot lower calls with arbitrary operand bundles!");
SDValue Callee = getValue(I.getCalledOperand());
#include "llvm/IR/AutoUpgrade.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/Instruction.h"
#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
/// returns true if module is modified.
static bool UpgradeRetainReleaseMarker(Module &M) {
bool Changed = false;
- const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
+ const char *MarkerKey = objcarc::getRVMarkerModuleFlagStr();
NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
if (ModRetainReleaseMarker) {
MDNode *Op = ModRetainReleaseMarker->getOperand(0);
return *Current;
}
+CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
+ OperandBundleDef OB,
+ Instruction *InsertPt) {
+ if (CB->getOperandBundle(ID))
+ return CB;
+
+ SmallVector<OperandBundleDef, 1> Bundles;
+ CB->getOperandBundlesAsDefs(Bundles);
+ Bundles.push_back(OB);
+ return Create(CB, Bundles, InsertPt);
+}
+
+CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
+ Instruction *InsertPt) {
+ SmallVector<OperandBundleDef, 1> Bundles;
+ bool CreateNew = false;
+
+ for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
+ auto Bundle = CB->getOperandBundleAt(I);
+ if (Bundle.getTagID() == ID) {
+ CreateNew = true;
+ continue;
+ }
+ Bundles.emplace_back(Bundle);
+ }
+
+ return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
+}
+
//===----------------------------------------------------------------------===//
// CallInst Implementation
//===----------------------------------------------------------------------===//
"gc-transition operand bundle id drifted!");
(void)GCLiveEntry;
+ auto *ClangAttachedCall =
+ pImpl->getOrInsertBundleTag("clang.arc.attachedcall");
+ assert(ClangAttachedCall->second == LLVMContext::OB_clang_arc_attachedcall &&
+ "clang.arc.attachedcall operand bundle id drifted!");
+ (void)ClangAttachedCall;
+
SyncScope::ID SingleThreadSSID =
pImpl->getOrInsertSyncScopeID("singlethread");
assert(SingleThreadSSID == SyncScope::SingleThread &&
// and at most one "preallocated" operand bundle.
bool FoundDeoptBundle = false, FoundFuncletBundle = false,
FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
- FoundPreallocatedBundle = false, FoundGCLiveBundle = false;;
+ FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
+ FoundAttachedCallBundle = false;
for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
OperandBundleUse BU = Call.getOperandBundleAt(i);
uint32_t Tag = BU.getTagID();
Assert(!FoundGCLiveBundle, "Multiple gc-live operand bundles",
Call);
FoundGCLiveBundle = true;
+ } else if (Tag == LLVMContext::OB_clang_arc_attachedcall) {
+ Assert(!FoundAttachedCallBundle,
+ "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
+ FoundAttachedCallBundle = true;
}
}
+ if (FoundAttachedCallBundle)
+ Assert(FTy->getReturnType()->isPointerTy(),
+ "a call with operand bundle \"clang.arc.attachedcall\" must call a "
+ "function returning a pointer",
+ Call);
+
// Verify that each inlinable callsite of a debug-info-bearing function in a
// debug-info-bearing function has a debug location attached to it. Failure to
// do so causes assertion failures when the inliner sets up inline scope info.
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
}
unsigned CallOpc = AArch64ISD::CALL;
- // Calls marked with "rv_marker" are special. They should be expanded to the
- // call, directly followed by a special marker sequence. Use the CALL_RVMARKER
- // to do that.
- if (CLI.CB && CLI.CB->hasRetAttr("rv_marker")) {
- assert(!IsTailCall && "tail calls cannot be marked with rv_marker");
+ // Calls with operand bundle "clang.arc.attachedcall" are special. They should
+ // be expanded to the call, directly followed by a special marker sequence.
+ // Use the CALL_RVMARKER to do that.
+ if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
+ assert(!IsTailCall &&
+ "tail calls cannot be marked with clang.arc.attachedcall");
CallOpc = AArch64ISD::CALL_RVMARKER;
}
Autorelease,
StoreStrong,
RetainRV,
+ ClaimRV,
RetainAutorelease,
RetainAutoreleaseRV,
};
Autorelease = nullptr;
StoreStrong = nullptr;
RetainRV = nullptr;
+ ClaimRV = nullptr;
RetainAutorelease = nullptr;
RetainAutoreleaseRV = nullptr;
}
case ARCRuntimeEntryPointKind::RetainRV:
return getIntrinsicEntryPoint(RetainRV,
Intrinsic::objc_retainAutoreleasedReturnValue);
+ case ARCRuntimeEntryPointKind::ClaimRV:
+ return getIntrinsicEntryPoint(
+ ClaimRV, Intrinsic::objc_unsafeClaimAutoreleasedReturnValue);
case ARCRuntimeEntryPointKind::RetainAutorelease:
return getIntrinsicEntryPoint(RetainAutorelease,
Intrinsic::objc_retainAutorelease);
/// Declaration for objc_retainAutoreleasedReturnValue().
Function *RetainRV = nullptr;
+ /// Declaration for objc_unsafeClaimAutoreleasedReturnValue().
+ Function *ClaimRV = nullptr;
+
/// Declaration for objc_retainAutorelease().
Function *RetainAutorelease = nullptr;
#include "ObjCARC.h"
#include "llvm-c/Initialization.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/InitializePasses.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
namespace llvm {
class PassRegistry;
void LLVMInitializeObjCARCOpts(LLVMPassRegistryRef R) {
initializeObjCARCOpts(*unwrap(R));
}
+
+CallInst *objcarc::createCallInstWithColors(
+ FunctionCallee Func, ArrayRef<Value *> Args, const Twine &NameStr,
+ Instruction *InsertBefore,
+ const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
+ FunctionType *FTy = Func.getFunctionType();
+ Value *Callee = Func.getCallee();
+ SmallVector<OperandBundleDef, 1> OpBundles;
+
+ if (!BlockColors.empty()) {
+ const ColorVector &CV = BlockColors.find(InsertBefore->getParent())->second;
+ assert(CV.size() == 1 && "non-unique color for block!");
+ Instruction *EHPad = CV.front()->getFirstNonPHI();
+ if (EHPad->isEHPad())
+ OpBundles.emplace_back("funclet", EHPad);
+ }
+
+ return CallInst::Create(FTy, Callee, Args, OpBundles, NameStr, InsertBefore);
+}
+
+std::pair<bool, bool>
+BundledRetainClaimRVs::insertAfterInvokes(Function &F, DominatorTree *DT) {
+ bool Changed = false, CFGChanged = false;
+
+ for (BasicBlock &BB : F) {
+ auto *I = dyn_cast<InvokeInst>(BB.getTerminator());
+
+ if (!I)
+ continue;
+
+ if (!objcarc::hasAttachedCallOpBundle(I))
+ continue;
+
+ BasicBlock *DestBB = I->getNormalDest();
+
+ if (!DestBB->getSinglePredecessor()) {
+ assert(I->getSuccessor(0) == DestBB &&
+ "the normal dest is expected to be the first successor");
+ DestBB = SplitCriticalEdge(I, 0, CriticalEdgeSplittingOptions(DT));
+ CFGChanged = true;
+ }
+
+ // We don't have to call insertRVCallWithColors since DestBB is the normal
+ // destination of the invoke.
+ insertRVCall(&*DestBB->getFirstInsertionPt(), I);
+ Changed = true;
+ }
+
+ return std::make_pair(Changed, CFGChanged);
+}
+
+CallInst *BundledRetainClaimRVs::insertRVCall(Instruction *InsertPt,
+ CallBase *AnnotatedCall) {
+ DenseMap<BasicBlock *, ColorVector> BlockColors;
+ return insertRVCallWithColors(InsertPt, AnnotatedCall, BlockColors);
+}
+
+CallInst *BundledRetainClaimRVs::insertRVCallWithColors(
+ Instruction *InsertPt, CallBase *AnnotatedCall,
+ const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
+ IRBuilder<> Builder(InsertPt);
+ bool IsRetainRV = objcarc::hasAttachedCallOpBundle(AnnotatedCall, true);
+ Function *Func = EP.get(IsRetainRV ? ARCRuntimeEntryPointKind::RetainRV
+ : ARCRuntimeEntryPointKind::ClaimRV);
+ Type *ParamTy = Func->getArg(0)->getType();
+ Value *CallArg = Builder.CreateBitCast(AnnotatedCall, ParamTy);
+ auto *Call =
+ createCallInstWithColors(Func, CallArg, "", InsertPt, BlockColors);
+ RVCalls[Call] = AnnotatedCall;
+ return Call;
+}
+
+BundledRetainClaimRVs::~BundledRetainClaimRVs() {
+ if (ContractPass) {
+ // At this point, we know that the annotated calls can't be tail calls as
+ // they are followed by marker instructions and retainRV/claimRV calls. Mark
+ // them as notail, so that the backend knows these calls can't be tail
+ // calls.
+ for (auto P : RVCalls)
+ if (auto *CI = dyn_cast<CallInst>(P.second))
+ CI->setTailCallKind(CallInst::TCK_NoTail);
+ } else {
+ for (auto P : RVCalls)
+ EraseInstruction(P.first);
+ }
+
+ RVCalls.clear();
+}
#ifndef LLVM_LIB_TRANSFORMS_OBJCARC_OBJCARC_H
#define LLVM_LIB_TRANSFORMS_OBJCARC_OBJCARC_H
+#include "ARCRuntimeEntryPoints.h"
+#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/ObjCARCAnalysisUtils.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Transforms/Utils/Local.h"
namespace llvm {
}
}
+static inline MDString *getRVInstMarker(Module &M) {
+ const char *MarkerKey = getRVMarkerModuleFlagStr();
+ return dyn_cast_or_null<MDString>(M.getModuleFlag(MarkerKey));
+}
+
+/// Create a call instruction with the correct funclet token. This should be
+/// called instead of calling CallInst::Create directly unless the call is
+/// going to be removed from the IR before WinEHPrepare.
+CallInst *createCallInstWithColors(
+ FunctionCallee Func, ArrayRef<Value *> Args, const Twine &NameStr,
+ Instruction *InsertBefore,
+ const DenseMap<BasicBlock *, ColorVector> &BlockColors);
+
+class BundledRetainClaimRVs {
+public:
+ BundledRetainClaimRVs(ARCRuntimeEntryPoints &P, bool ContractPass)
+ : EP(P), ContractPass(ContractPass) {}
+ ~BundledRetainClaimRVs();
+
+ /// Insert a retainRV/claimRV call to the normal destination blocks of invokes
+ /// with operand bundle "clang.arc.attachedcall". If the edge to the normal
+ /// destination block is a critical edge, split it.
+ std::pair<bool, bool> insertAfterInvokes(Function &F, DominatorTree *DT);
+
+ /// Insert a retainRV/claimRV call.
+ CallInst *insertRVCall(Instruction *InsertPt, CallBase *AnnotatedCall);
+
+ /// Insert a retainRV/claimRV call with colors.
+ CallInst *insertRVCallWithColors(
+ Instruction *InsertPt, CallBase *AnnotatedCall,
+ const DenseMap<BasicBlock *, ColorVector> &BlockColors);
+
+ /// See if an instruction is a bundled retainRV/claimRV call.
+ bool contains(const Instruction *I) const {
+ if (auto *CI = dyn_cast<CallInst>(I))
+ return RVCalls.count(CI);
+ return false;
+ }
+
+ /// Remove a retainRV/claimRV call entirely.
+ void eraseInst(CallInst *CI) {
+ auto It = RVCalls.find(CI);
+ if (It != RVCalls.end()) {
+ // Remove call to @llvm.objc.clang.arc.noop.use.
+ for (auto U = It->second->user_begin(), E = It->second->user_end(); U != E; ++U)
+ if (auto *CI = dyn_cast<CallInst>(*U))
+ if (CI->getIntrinsicID() == Intrinsic::objc_clang_arc_noop_use) {
+ CI->eraseFromParent();
+ break;
+ }
+
+ auto *NewCall = CallBase::removeOperandBundle(
+ It->second, LLVMContext::OB_clang_arc_attachedcall, It->second);
+ NewCall->copyMetadata(*It->second);
+ It->second->replaceAllUsesWith(NewCall);
+ It->second->eraseFromParent();
+ RVCalls.erase(It);
+ }
+ EraseInstruction(CI);
+ }
+
+private:
+ /// A map of inserted retainRV/claimRV calls to annotated calls/invokes.
+ DenseMap<CallInst *, CallBase *> RVCalls;
+
+ ARCRuntimeEntryPoints &EP;
+ bool ContractPass;
+};
+
} // end namespace objcarc
} // end namespace llvm
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstIterator.h"
class ObjCARCContract {
bool Changed;
+ bool CFGChanged;
AAResults *AA;
DominatorTree *DT;
ProvenanceAnalysis PA;
ARCRuntimeEntryPoints EP;
-
- /// A flag indicating whether this optimization pass should run.
- bool Run;
+ BundledRetainClaimRVs *BundledInsts = nullptr;
/// The inline asm string to insert between calls and RetainRV calls to make
/// the optimization work on targets which need it.
public:
bool init(Module &M);
bool run(Function &F, AAResults *AA, DominatorTree *DT);
+ bool hasCFGChanged() const { return CFGChanged; }
};
class ObjCARCContractLegacyPass : public FunctionPass {
return Retain;
}
-/// Create a call instruction with the correct funclet token. Should be used
-/// instead of calling CallInst::Create directly.
-static CallInst *
-createCallInst(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
- const Twine &NameStr, Instruction *InsertBefore,
- const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
- SmallVector<OperandBundleDef, 1> OpBundles;
- if (!BlockColors.empty()) {
- const ColorVector &CV = BlockColors.find(InsertBefore->getParent())->second;
- assert(CV.size() == 1 && "non-unique color for block!");
- Instruction *EHPad = CV.front()->getFirstNonPHI();
- if (EHPad->isEHPad())
- OpBundles.emplace_back("funclet", EHPad);
- }
-
- return CallInst::Create(FTy, Func, Args, OpBundles, NameStr, InsertBefore);
-}
-
-static CallInst *
-createCallInst(FunctionCallee Func, ArrayRef<Value *> Args, const Twine &NameStr,
- Instruction *InsertBefore,
- const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
- return createCallInst(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
- InsertBefore, BlockColors);
-}
-
/// Attempt to merge an objc_release with a store, load, and objc_retain to form
/// an objc_storeStrong. An objc_storeStrong:
///
if (Args[1]->getType() != I8X)
Args[1] = new BitCastInst(Args[1], I8X, "", Store);
Function *Decl = EP.get(ARCRuntimeEntryPointKind::StoreStrong);
- CallInst *StoreStrong = createCallInst(Decl, Args, "", Store, BlockColors);
+ CallInst *StoreStrong =
+ objcarc::createCallInstWithColors(Decl, Args, "", Store, BlockColors);
StoreStrong->setDoesNotThrow();
StoreStrong->setDebugLoc(Store->getDebugLoc());
case ARCInstKind::RetainRV:
case ARCInstKind::ClaimRV: {
// If we're compiling for a target which needs a special inline-asm
- // marker to do the return value optimization, insert it now.
+ // marker to do the return value optimization and the retainRV/claimRV call
+ // wasn't bundled with a call, insert the marker now.
if (!RVInstMarker)
return false;
+
+ if (BundledInsts->contains(Inst))
+ return false;
+
BasicBlock::iterator BBI = Inst->getIterator();
BasicBlock *InstParent = Inst->getParent();
RVInstMarker->getString(),
/*Constraints=*/"", /*hasSideEffects=*/true);
- createCallInst(IA, None, "", Inst, BlockColors);
+ objcarc::createCallInstWithColors(IA, None, "", Inst, BlockColors);
}
decline_rv_optimization:
return false;
Inst->eraseFromParent();
return true;
default:
+ if (auto *CI = dyn_cast<CallInst>(Inst))
+ if (CI->getIntrinsicID() == Intrinsic::objc_clang_arc_noop_use) {
+ // Remove calls to @llvm.objc.clang.arc.noop.use(...).
+ Changed = true;
+ CI->eraseFromParent();
+ }
return true;
}
}
//===----------------------------------------------------------------------===//
bool ObjCARCContract::init(Module &M) {
- // If nothing in the Module uses ARC, don't do anything.
- Run = ModuleHasARC(M);
- if (!Run)
- return false;
-
EP.init(&M);
// Initialize RVInstMarker.
- const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
- RVInstMarker = dyn_cast_or_null<MDString>(M.getModuleFlag(MarkerKey));
+ RVInstMarker = getRVInstMarker(M);
return false;
}
if (!EnableARCOpts)
return false;
- // If nothing in the Module uses ARC, don't do anything.
- if (!Run)
- return false;
-
- Changed = false;
+ Changed = CFGChanged = false;
AA = A;
DT = D;
PA.setAA(A);
+ BundledRetainClaimRVs BRV(EP, true);
+ BundledInsts = &BRV;
+
+ std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, DT);
+ Changed |= R.first;
+ CFGChanged |= R.second;
DenseMap<BasicBlock *, ColorVector> BlockColors;
if (F.hasPersonalityFn() &&
LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
+ if (auto *CI = dyn_cast<CallInst>(Inst))
+ if (objcarc::hasAttachedCallOpBundle(CI)) {
+ BundledInsts->insertRVCallWithColors(&*I, CI, BlockColors);
+ --I;
+ Changed = true;
+ }
+
// First try to peephole Inst. If there is nothing further we can do in
// terms of undoing objc-arc-expand, process the next inst.
if (tryToPeepholeInstruction(F, Inst, I, TailOkForStoreStrongs,
void ObjCARCContractLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AAResultsWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.setPreservesCFG();
}
Pass *llvm::createObjCARCContractPass() {
bool Changed = OCAC.run(F, &AM.getResult<AAManager>(F),
&AM.getResult<DominatorTreeAnalysis>(F));
+ bool CFGChanged = OCAC.hasCFGChanged();
if (Changed) {
PreservedAnalyses PA;
- PA.preserveSet<CFGAnalyses>();
+ if (!CFGChanged)
+ PA.preserveSet<CFGAnalyses>();
return PA;
}
return PreservedAnalyses::all();
#include "llvm/Analysis/ObjCARCAliasAnalysis.h"
#include "llvm/Analysis/ObjCARCAnalysisUtils.h"
#include "llvm/Analysis/ObjCARCInstKind.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
/// The main ARC optimization pass.
class ObjCARCOpt {
bool Changed;
+ bool CFGChanged;
ProvenanceAnalysis PA;
/// A cache of references to runtime entry point constants.
/// MDKind identifiers.
ARCMDKindCache MDKindCache;
- /// A flag indicating whether this optimization pass should run.
- bool Run;
+ BundledRetainClaimRVs *BundledInsts = nullptr;
/// A flag indicating whether the optimization that removes or moves
/// retain/release pairs should be performed.
void init(Module &M);
bool run(Function &F, AAResults &AA);
void releaseMemory();
+ bool hasCFGChanged() const { return CFGChanged; }
};
/// The main ARC optimization pass.
void ObjCARCOptLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<ObjCARCAAWrapperPass>();
AU.addRequired<AAResultsWrapperPass>();
- // ARC optimization doesn't currently split critical edges.
- AU.setPreservesCFG();
}
/// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
}
}
+ assert(!BundledInsts->contains(RetainRV) &&
+ "a bundled retainRV's argument should be a call");
+
// Turn it to a plain objc_retain.
Changed = true;
++NumPeeps;
Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
Instruction *Inst, const Value *&Arg, ARCInstKind Class,
Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg) {
+ if (BundledInsts->contains(Inst))
+ return false;
+
// Must be in the same basic block.
assert(Inst->getParent() == AutoreleaseRV->getParent());
for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
Instruction *Inst = &*I++;
+ if (auto *CI = dyn_cast<CallInst>(Inst))
+ if (objcarc::hasAttachedCallOpBundle(CI)) {
+ BundledInsts->insertRVCall(&*I, CI);
+ Changed = true;
+ }
+
ARCInstKind Class = GetBasicARCInstKind(Inst);
// Skip this loop if this instruction isn't itself an ARC intrinsic.
// We can delete this call if it takes an inert value.
SmallPtrSet<Value *, 1> VisitedPhis;
+ if (BundledInsts->contains(Inst)) {
+ UsedInThisFunction |= 1 << unsigned(Class);
+ return;
+ }
+
if (IsNoopOnGlobal(Class))
if (isInertARCValue(Inst->getOperand(0), VisitedPhis)) {
if (!Inst->getType()->isVoidTy())
if (Ptr == Arg)
continue; // Handled above.
TopDownPtrState &S = MI->second;
- if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
+ if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class, *BundledInsts))
continue;
S.HandlePotentialUse(Inst, Ptr, PA, Class);
++NumRets;
LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
<< "\n");
- EraseInstruction(Retain);
+ BundledInsts->eraseInst(Retain);
EraseInstruction(Autorelease);
}
}
if (!EnableARCOpts)
return;
- // If nothing in the Module uses ARC, don't do anything.
- Run = ModuleHasARC(M);
- if (!Run)
- return;
-
// Intuitively, objc_retain and others are nocapture, however in practice
// they are not, because they return their argument value. And objc_release
// calls finalizers which can have arbitrary side effects.
if (!EnableARCOpts)
return false;
- // If nothing in the Module uses ARC, don't do anything.
- if (!Run)
- return false;
-
- Changed = false;
+ Changed = CFGChanged = false;
+ BundledRetainClaimRVs BRV(EP, false);
+ BundledInsts = &BRV;
LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
<< " >>>"
"\n");
+ std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, nullptr);
+ Changed |= R.first;
+ CFGChanged |= R.second;
+
PA.setAA(&AA);
#ifndef NDEBUG
OCAO.init(*F.getParent());
bool Changed = OCAO.run(F, AM.getResult<AAManager>(F));
+ bool CFGChanged = OCAO.hasCFGChanged();
if (Changed) {
PreservedAnalyses PA;
- PA.preserveSet<CFGAnalyses>();
+ if (!CFGChanged)
+ PA.preserveSet<CFGAnalyses>();
return PA;
}
return PreservedAnalyses::all();
#include "ObjCARC.h"
#include "llvm/Analysis/ObjCARCAnalysisUtils.h"
#include "llvm/Analysis/ObjCARCInstKind.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
InsertAfter = skipDebugIntrinsics(InsertAfter);
InsertReverseInsertPt(&*InsertAfter);
+
+ // Don't insert anything between a call/invoke with operand bundle
+ // "clang.arc.attachedcall" and the retainRV/claimRV call that uses the call
+ // result.
+ if (auto *CB = dyn_cast<CallBase>(Inst))
+ if (objcarc::hasAttachedCallOpBundle(CB))
+ SetCFGHazardAfflicted(true);
};
// Check for possible direct uses.
llvm_unreachable("Sequence unknown enum value");
}
-bool TopDownPtrState::HandlePotentialAlterRefCount(Instruction *Inst,
- const Value *Ptr,
- ProvenanceAnalysis &PA,
- ARCInstKind Class) {
+bool TopDownPtrState::HandlePotentialAlterRefCount(
+ Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA,
+ ARCInstKind Class, const BundledRetainClaimRVs &BundledRVs) {
// Check for possible releases. Treat clang.arc.use as a releasing instruction
// to prevent sinking a retain past it.
if (!CanDecrementRefCount(Inst, Ptr, PA, Class) &&
assert(!HasReverseInsertPts());
InsertReverseInsertPt(Inst);
+ // Don't insert anything between a call/invoke with operand bundle
+ // "clang.arc.attachedcall" and the retainRV/claimRV call that uses the call
+ // result.
+ if (BundledRVs.contains(Inst))
+ SetCFGHazardAfflicted(true);
+
// One call can't cause a transition from S_Retain to S_CanRelease
// and S_CanRelease to S_Use. If we've made the first transition,
// we're done.
namespace objcarc {
class ARCMDKindCache;
+class BundledRetainClaimRVs;
class ProvenanceAnalysis;
/// \enum Sequence
ProvenanceAnalysis &PA, ARCInstKind Class);
bool HandlePotentialAlterRefCount(Instruction *Inst, const Value *Ptr,
- ProvenanceAnalysis &PA, ARCInstKind Class);
+ ProvenanceAnalysis &PA, ARCInstKind Class,
+ const BundledRetainClaimRVs &BundledRVs);
};
} // end namespace objcarc
/// represented here for efficient lookup.
SmallPtrSet<Function *, 16> MRVFunctionsTracked;
- /// MustTailFunctions - Each function here is a callee of non-removable
- /// musttail call site.
- SmallPtrSet<Function *, 16> MustTailCallees;
+ /// A list of functions whose return cannot be modified.
+ SmallPtrSet<Function *, 16> MustPreserveReturnsInFunctions;
/// TrackingIncomingArguments - This is the set of functions for whose
/// arguments we make optimistic assumptions about and try to prove as
TrackedRetVals.insert(std::make_pair(F, ValueLatticeElement()));
}
- /// AddMustTailCallee - If the SCCP solver finds that this function is called
- /// from non-removable musttail call site.
- void AddMustTailCallee(Function *F) {
- MustTailCallees.insert(F);
+ /// Add function to the list of functions whose return cannot be modified.
+ void addToMustPreserveReturnsInFunctions(Function *F) {
+ MustPreserveReturnsInFunctions.insert(F);
}
- /// Returns true if the given function is called from non-removable musttail
- /// call site.
- bool isMustTailCallee(Function *F) {
- return MustTailCallees.count(F);
+ /// Returns true if the return of the given function cannot be modified.
+ bool mustPreserveReturn(Function *F) {
+ return MustPreserveReturnsInFunctions.count(F);
}
void AddArgumentTrackedFunction(Function *F) {
return MRVFunctionsTracked;
}
- /// getMustTailCallees - Get the set of functions which are called
- /// from non-removable musttail call sites.
- const SmallPtrSet<Function *, 16> getMustTailCallees() {
- return MustTailCallees;
- }
-
/// markOverdefined - Mark the specified value overdefined. This
/// works with both scalars and structs.
void markOverdefined(Value *V) {
assert(Const && "Constant is nullptr here!");
// Replacing `musttail` instructions with constant breaks `musttail` invariant
- // unless the call itself can be removed
- CallInst *CI = dyn_cast<CallInst>(V);
- if (CI && CI->isMustTailCall() && !CI->isSafeToRemove()) {
- Function *F = CI->getCalledFunction();
+ // unless the call itself can be removed.
+ // Calls with "clang.arc.attachedcall" implicitly use the return value and
+ // those uses cannot be updated with a constant.
+ CallBase *CB = dyn_cast<CallBase>(V);
+ if (CB && ((CB->isMustTailCall() && !CB->isSafeToRemove()) ||
+ CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall))) {
+ Function *F = CB->getCalledFunction();
// Don't zap returns of the callee
if (F)
- Solver.AddMustTailCallee(F);
+ Solver.addToMustPreserveReturnsInFunctions(F);
- LLVM_DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI
+ LLVM_DEBUG(dbgs() << " Can\'t treat the result of call " << *CB
<< " as a constant\n");
return false;
}
if (!Solver.isArgumentTrackedFunction(&F))
return;
- // There is a non-removable musttail call site of this function. Zapping
- // returns is not allowed.
- if (Solver.isMustTailCallee(&F)) {
- LLVM_DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName()
- << " due to present musttail call of it\n");
+ if (Solver.mustPreserveReturn(&F)) {
+ LLVM_DEBUG(
+ dbgs()
+ << "Can't zap returns of the function : " << F.getName()
+ << " due to present musttail or \"clang.arc.attachedcall\" call of "
+ "it\n");
return;
}
isa<PseudoProbeInst>(&I))
continue;
- bool IsNoTail = CI->isNoTailCall() || CI->hasOperandBundles();
+ // Special-case operand bundle "clang.arc.attachedcall".
+ bool IsNoTail =
+ CI->isNoTailCall() || CI->hasOperandBundlesOtherThan(
+ LLVMContext::OB_clang_arc_attachedcall);
if (!IsNoTail && CI->doesNotAccessMemory()) {
// A call to a readnone function whose arguments are all things computed
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/ObjCARCAnalysisUtils.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
-#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/Argument.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <algorithm>
#include <cassert>
}
}
+/// An operand bundle "clang.arc.attachedcall" on a call indicates the call
+/// result is implicitly consumed by a call to retainRV or claimRV immediately
+/// after the call. This function inlines the retainRV/claimRV calls.
+///
+/// There are three cases to consider:
+///
+/// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
+/// object in the callee return block, the autoreleaseRV call and the
+/// retainRV/claimRV call in the caller cancel out. If the call in the caller
+/// is a claimRV call, a call to objc_release is emitted.
+///
+/// 2. If there is a call in the callee return block that doesn't have operand
+/// bundle "clang.arc.attachedcall", the operand bundle on the original call
+/// is transferred to the call in the callee.
+///
+/// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
+/// a retainRV call.
+static void
+inlineRetainOrClaimRVCalls(CallBase &CB,
+ const SmallVectorImpl<ReturnInst *> &Returns) {
+ Module *Mod = CB.getModule();
+ bool IsRetainRV = objcarc::hasAttachedCallOpBundle(&CB, true),
+ IsClaimRV = !IsRetainRV;
+
+ for (auto *RI : Returns) {
+ Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
+ BasicBlock::reverse_iterator I = ++(RI->getIterator().getReverse());
+ BasicBlock::reverse_iterator EI = RI->getParent()->rend();
+ bool InsertRetainCall = IsRetainRV;
+ IRBuilder<> Builder(RI->getContext());
+
+ // Walk backwards through the basic block looking for either a matching
+ // autoreleaseRV call or an unannotated call.
+ for (; I != EI;) {
+ auto CurI = I++;
+
+ // Ignore casts.
+ if (isa<CastInst>(*CurI))
+ continue;
+
+ if (auto *II = dyn_cast<IntrinsicInst>(&*CurI)) {
+ if (II->getIntrinsicID() == Intrinsic::objc_autoreleaseReturnValue &&
+ II->hasNUses(0) &&
+ objcarc::GetRCIdentityRoot(II->getOperand(0)) == RetOpnd) {
+ // If we've found a matching authoreleaseRV call:
+ // - If claimRV is attached to the call, insert a call to objc_release
+ // and erase the autoreleaseRV call.
+ // - If retainRV is attached to the call, just erase the autoreleaseRV
+ // call.
+ if (IsClaimRV) {
+ Builder.SetInsertPoint(II);
+ Function *IFn =
+ Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
+ Value *BC =
+ Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
+ Builder.CreateCall(IFn, BC, "");
+ }
+ II->eraseFromParent();
+ InsertRetainCall = false;
+ }
+ } else if (auto *CI = dyn_cast<CallInst>(&*CurI)) {
+ if (objcarc::GetRCIdentityRoot(CI) == RetOpnd &&
+ !objcarc::hasAttachedCallOpBundle(CI)) {
+ // If we've found an unannotated call that defines RetOpnd, add a
+ // "clang.arc.attachedcall" operand bundle.
+ Value *BundleArgs[] = {ConstantInt::get(
+ Builder.getInt64Ty(),
+ objcarc::getAttachedCallOperandBundleEnum(IsRetainRV))};
+ OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
+ auto *NewCall = CallBase::addOperandBundle(
+ CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI);
+ NewCall->copyMetadata(*CI);
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ InsertRetainCall = false;
+ }
+ }
+
+ break;
+ }
+
+ if (InsertRetainCall) {
+ // The retainRV is attached to the call and we've failed to find a
+ // matching autoreleaseRV or an annotated call in the callee. Emit a call
+ // to objc_retain.
+ Builder.SetInsertPoint(RI);
+ Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
+ Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
+ Builder.CreateCall(IFn, BC, "");
+ }
+ }
+}
+
/// This function inlines the called function into the basic block of the
/// caller. This returns false if it is not possible to inline this call.
/// The program is still in a well defined state if this occurs though.
// ... and "funclet" operand bundles.
if (Tag == LLVMContext::OB_funclet)
continue;
+ if (Tag == LLVMContext::OB_clang_arc_attachedcall)
+ continue;
return InlineResult::failure("unsupported operand bundle");
}
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
+ // Insert retainRV/clainRV runtime calls.
+ if (objcarc::hasAttachedCallOpBundle(&CB))
+ inlineRetainOrClaimRVCalls(CB, Returns);
+
if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
// Update the BFI of blocks cloned into the caller.
updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
; CHECK-NEXT: <OPERAND_BUNDLE_TAG
; CHECK-NEXT: <OPERAND_BUNDLE_TAG
; CHECK-NEXT: <OPERAND_BUNDLE_TAG
+; CHECK-NEXT: <OPERAND_BUNDLE_TAG
; CHECK-NEXT: </OPERAND_BUNDLE_TAGS_BLOCK
; CHECK: <FUNCTION_BLOCK
; GISEL-NOT: mov x29, x29
;
entry:
- %call = call "rv_marker" i8* @foo1()
+ %call = call i8* @foo1() [ "clang.arc.attachedcall"(i64 0) ]
ret i8* %call
}
entry:
%tobool.not = icmp eq i32 %c, 0
%.sink = select i1 %tobool.not, i32 2, i32 1
- %call1 = call "rv_marker" i8* @foo0(i32 %.sink)
+ %call1 = call i8* @foo0(i32 %.sink) [ "clang.arc.attachedcall"(i64 0) ]
tail call void @foo2(i8* %call1)
ret void
}
; SELDAG-NEXT: mov x29, x29
;
entry:
- %call = call "rv_marker" i8* @foo1()
+ %call = call i8* @foo1() [ "clang.arc.attachedcall"(i64 0) ]
invoke void @objc_object(i8* %call) #5
to label %invoke.cont unwind label %lpad
%s = alloca %struct.S, align 1
%0 = getelementptr inbounds %struct.S, %struct.S* %s, i64 0, i32 0
call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %0) #2
- %call = invoke "rv_marker" i8* @foo1()
+ %call = invoke i8* @foo1() [ "clang.arc.attachedcall"(i64 0) ]
to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
;
entry:
%0 = load i8* ()*, i8* ()** @fptr, align 8
- %call = call "rv_marker" i8* %0()
+ %call = call i8* %0() [ "clang.arc.attachedcall"(i64 0) ]
tail call void @foo2(i8* %call)
ret i8* %call
}
-declare void @foo(i64, i64, i64)
+declare i8* @foo(i64, i64, i64)
define dso_local void @rv_marker_multiarg(i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: rv_marker_multiarg
; CHECK-NEXT: bl foo
; SELDAG-NEXT: mov x29, x29
; GISEL-NOT: mov x29, x29
- call "rv_marker" void @foo(i64 %c, i64 %b, i64 %a)
+ call i8* @foo(i64 %c, i64 %b, i64 %a) [ "clang.arc.attachedcall"(i64 0) ]
ret void
}
-; RUN: opt < %s -deadargelim -S | not grep DEAD
+; RUN: opt < %s -deadargelim -S | FileCheck %s
+
+@g0 = global i8 0, align 8
+
+; CHECK-NOT: DEAD
; Dead arg only used by dead retval
define internal i32 @test(i32 %DEADARG) {
ret i32 %Y
}
+; The callee function's return type shouldn't be changed if the call result is
+; used.
+
+; CHECK-LABEL: define internal i8* @callee4()
+
+define internal i8* @callee4(i8* %a0) {
+ ret i8* @g0;
+}
+
+declare void @llvm.objc.clang.arc.noop.use(...)
+
+; CHECK-LABEL: define i8* @test4(
+; CHECK: tail call i8* @callee4() [ "clang.arc.attachedcall"(i64 0) ]
+
+define i8* @test4() {
+ %call = tail call i8* @callee4(i8* @g0) [ "clang.arc.attachedcall"(i64 0) ]
+ call void (...) @llvm.objc.clang.arc.noop.use(i8* %call)
+ ret i8* @g0
+}
--- /dev/null
+; RUN: opt < %s -inline -S | FileCheck %s
+
+@g0 = global i8* null, align 8
+declare i8* @foo0()
+
+define i8* @callee0_autoreleaseRV() {
+ %call = call i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+ %1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %call)
+ ret i8* %call
+}
+
+; CHECK-LABEL: define void @test0_autoreleaseRV(
+; CHECK: call i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+
+define void @test0_autoreleaseRV() {
+ %call = call i8* @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(i64 0) ]
+ ret void
+}
+
+; CHECK-LABEL: define void @test0_claimRV_autoreleaseRV(
+; CHECK: %[[CALL:.*]] = call i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+; CHECK: call void @llvm.objc.release(i8* %[[CALL]])
+; CHECK-NEXT: ret void
+
+define void @test0_claimRV_autoreleaseRV() {
+ %call = call i8* @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(i64 1) ]
+ ret void
+}
+
+; CHECK-LABEL: define void @test1_autoreleaseRV(
+; CHECK: invoke i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+
+define void @test1_autoreleaseRV() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+ %call = invoke i8* @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(i64 0) ]
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont:
+ ret void
+
+lpad:
+ %0 = landingpad { i8*, i32 }
+ cleanup
+ resume { i8*, i32 } undef
+}
+
+; CHECK-LABEL: define void @test1_claimRV_autoreleaseRV(
+; CHECK: %[[INVOKE:.*]] = invoke i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+; CHECK: call void @llvm.objc.release(i8* %[[INVOKE]])
+; CHECK-NEXT: br
+
+define void @test1_claimRV_autoreleaseRV() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+ %call = invoke i8* @callee0_autoreleaseRV() [ "clang.arc.attachedcall"(i64 1) ]
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont:
+ ret void
+
+lpad:
+ %0 = landingpad { i8*, i32 }
+ cleanup
+ resume { i8*, i32 } undef
+}
+
+define i8* @callee1_no_autoreleaseRV() {
+ %call = call i8* @foo0()
+ ret i8* %call
+}
+
+; CHECK-LABEL: define void @test2_no_autoreleaseRV(
+; CHECK: call i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+; CHECK-NEXT: ret void
+
+define void @test2_no_autoreleaseRV() {
+ %call = call i8* @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(i64 0) ]
+ ret void
+}
+
+; CHECK-LABEL: define void @test2_claimRV_no_autoreleaseRV(
+; CHECK: call i8* @foo0() [ "clang.arc.attachedcall"(i64 1) ]
+; CHECK-NEXT: ret void
+
+define void @test2_claimRV_no_autoreleaseRV() {
+ %call = call i8* @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(i64 1) ]
+ ret void
+}
+
+; CHECK-LABEL: define void @test3_no_autoreleaseRV(
+; CHECK: invoke i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+
+define void @test3_no_autoreleaseRV() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+ %call = invoke i8* @callee1_no_autoreleaseRV() [ "clang.arc.attachedcall"(i64 0) ]
+ to label %invoke.cont unwind label %lpad
+
+invoke.cont:
+ ret void
+
+lpad:
+ %0 = landingpad { i8*, i32 }
+ cleanup
+ resume { i8*, i32 } undef
+}
+
+define i8* @callee2_nocall() {
+ %1 = load i8*, i8** @g0, align 8
+ ret i8* %1
+}
+
+; Check that a call to @llvm.objc.retain is inserted if there is no matching
+; autoreleaseRV call or a call.
+
+; CHECK-LABEL: define void @test4_nocall(
+; CHECK: %[[V0:.*]] = load i8*, i8** @g0,
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %[[V0]])
+; CHECK-NEXT: ret void
+
+define void @test4_nocall() {
+ %call = call i8* @callee2_nocall() [ "clang.arc.attachedcall"(i64 0) ]
+ ret void
+}
+
+; CHECK-LABEL: define void @test4_claimRV_nocall(
+; CHECK: %[[V0:.*]] = load i8*, i8** @g0,
+; CHECK-NEXT: ret void
+
+define void @test4_claimRV_nocall() {
+ %call = call i8* @callee2_nocall() [ "clang.arc.attachedcall"(i64 1) ]
+ ret void
+}
+
+; Check that a call to @llvm.objc.retain is inserted if call to @foo already has
+; the attribute. I'm not sure this will happen in practice.
+
+define i8* @callee3_marker() {
+ %1 = call i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+ ret i8* %1
+}
+
+; CHECK-LABEL: define void @test5(
+; CHECK: %[[V0:.*]] = call i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+; CHECK-NEXT: call i8* @llvm.objc.retain(i8* %[[V0]])
+; CHECK-NEXT: ret void
+
+define void @test5() {
+ %call = call i8* @callee3_marker() [ "clang.arc.attachedcall"(i64 0) ]
+ ret void
+}
+
+; Don't pair up an autoreleaseRV in the callee and an retainRV in the caller
+; if there is an instruction between the ret instruction and the call to
+; autoreleaseRV that isn't a cast instruction.
+
+define i8* @callee0_autoreleaseRV2() {
+ %call = call i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+ %1 = tail call i8* @llvm.objc.autoreleaseReturnValue(i8* %call)
+ store i8* null, i8** @g0
+ ret i8* %call
+}
+
+; CHECK-LABEL: define void @test6(
+; CHECK: %[[V0:.*]] = call i8* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+; CHECK: call i8* @llvm.objc.autoreleaseReturnValue(i8* %[[V0]])
+; CHECK: store i8* null, i8** @g0, align 8
+; CHECK: call i8* @llvm.objc.retain(i8* %[[V0]])
+; CHECK-NEXT: ret void
+
+define void @test6() {
+ %call = call i8* @callee0_autoreleaseRV2() [ "clang.arc.attachedcall"(i64 0) ]
+ ret void
+}
+
+declare i8* @llvm.objc.autoreleaseReturnValue(i8*)
+declare i32 @__gxx_personality_v0(...)
; }
; }
+; CHECK-LABEL: define void @"\01?g@@YAXXZ"()
+; CHECK-LABEL: catch
+; CHECK: call void asm sideeffect "movl{{.*}}%ebp, %ebp{{.*}}", ""() [ "funclet"(token %1) ]
+
+; CHECK-LABEL: catch.1
+; CHECK: call void asm sideeffect "movl{{.*}}%ebp, %ebp{{.*}}", ""() [ "funclet"(token %1) ]
+
+; CHECK-LABEL: invoke.cont
+; CHECK: call void asm sideeffect "movl{{.*}}%ebp, %ebp{{.*}}", ""(){{$}}
+
define void @"\01?g@@YAXXZ"() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
entry:
%call = invoke i8* @"\01?f@@YAPAUobjc_object@@XZ"()
ret void
}
+; CHECK-LABEL: define dso_local void @"?test_attr_claimRV@@YAXXZ"()
+; CHECK: %[[CALL4:.*]] = notail call i8* @"?noexcept_func@@YAPAUobjc_object@@XZ"() [ "clang.arc.attachedcall"(i64 1) ]
+; CHECK: call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %[[CALL4]])
+
+; CHECK: %[[V1:.*]] = cleanuppad
+; CHECK: %[[CALL:.*]] = notail call i8* @"?noexcept_func@@YAPAUobjc_object@@XZ"() [ "funclet"(token %[[V1]]), "clang.arc.attachedcall"(i64 1) ]
+; CHECK: call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %[[CALL]]) [ "funclet"(token %[[V1]]) ]
+
+define dso_local void @"?test_attr_claimRV@@YAXXZ"() local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+entry:
+ invoke void @"?foo@@YAXXZ"()
+ to label %invoke.cont unwind label %ehcleanup
+
+invoke.cont: ; preds = %entry
+ %call.i4 = tail call i8* @"?noexcept_func@@YAPAUobjc_object@@XZ"() #2 [ "clang.arc.attachedcall"(i64 1) ]
+ ret void
+
+ehcleanup: ; preds = %entry
+ %0 = cleanuppad within none []
+ %call.i = call i8* @"?noexcept_func@@YAPAUobjc_object@@XZ"() #2 [ "funclet"(token %0), "clang.arc.attachedcall"(i64 1) ]
+ cleanupret from %0 unwind to caller
+}
+
declare i8* @"\01?f@@YAPAUobjc_object@@XZ"()
declare i32 @__CxxFrameHandler3(...)
+declare void @"?foo@@YAXXZ"()
+declare i8* @"?noexcept_func@@YAPAUobjc_object@@XZ"()
+
declare dllimport i8* @llvm.objc.retainAutoreleasedReturnValue(i8*)
+declare i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8*)
declare dllimport void @llvm.objc.release(i8*)
!llvm.module.flags = !{!0}
!0 = !{i32 1, !"clang.arc.retainAutoreleasedReturnValueMarker", !"movl\09%ebp, %ebp\09\09// marker for objc_retainAutoreleaseReturnValue"}
-
-; CHECK-LABEL: catch
-; CHECK: call void asm sideeffect "movl{{.*}}%ebp, %ebp{{.*}}", ""() [ "funclet"(token %1) ]
-
-; CHECK-LABEL: catch.1
-; CHECK: call void asm sideeffect "movl{{.*}}%ebp, %ebp{{.*}}", ""() [ "funclet"(token %1) ]
-
-; CHECK-LABEL: invoke.cont
-; CHECK: call void asm sideeffect "movl{{.*}}%ebp, %ebp{{.*}}", ""(){{$}}
--- /dev/null
+; RUN: opt -objc-arc-contract -S < %s | FileCheck %s
+; RUN: opt -passes=objc-arc-contract -S < %s | FileCheck %s
+
+; CHECK-LABEL: define void @test0() {
+; CHECK: %[[CALL:.*]] = notail call i8* @foo() [ "clang.arc.attachedcall"(i64 0) ]
+; CHECK: call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %[[CALL]])
+
+define void @test0() {
+ %call1 = call i8* @foo() [ "clang.arc.attachedcall"(i64 0) ]
+ ret void
+}
+
+; CHECK-LABEL: define void @test1() {
+; CHECK: %[[CALL:.*]] = notail call i8* @foo() [ "clang.arc.attachedcall"(i64 1) ]
+; CHECK: call i8* @llvm.objc.unsafeClaimAutoreleasedReturnValue(i8* %[[CALL]])
+
+define void @test1() {
+ %call1 = call i8* @foo() [ "clang.arc.attachedcall"(i64 1) ]
+ ret void
+}
+
+; CHECK-LABEL:define i8* @test2(
+; CHECK: %[[CALL1:.*]] = invoke i8* @foo() [ "clang.arc.attachedcall"(i64 0) ]
+
+; CHECK: %[[V0:.*]] = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %[[CALL1]])
+; CHECK-NEXT: br
+
+; CHECK: %[[CALL3:.*]] = invoke i8* @foo() [ "clang.arc.attachedcall"(i64 0) ]
+
+; CHECK: %[[V2:.*]] = call i8* @llvm.objc.retainAutoreleasedReturnValue(i8* %[[CALL3]])
+; CHECK-NEXT: br
+
+; CHECK: %[[RETVAL:.*]] = phi i8* [ %[[V0]], {{.*}} ], [ %[[V2]], {{.*}} ]
+; CHECK: ret i8* %[[RETVAL]]
+
+define i8* @test2(i1 zeroext %b) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+entry:
+ br i1 %b, label %if.then, label %if.end
+
+if.then:
+ %call1 = invoke i8* @foo() [ "clang.arc.attachedcall"(i64 0) ]
+ to label %cleanup unwind label %lpad
+
+lpad:
+ %0 = landingpad { i8*, i32 }
+ cleanup
+ resume { i8*, i32 } undef
+
+if.end:
+ %call3 = invoke i8* @foo() [ "clang.arc.attachedcall"(i64 0) ]
+ to label %cleanup unwind label %lpad
+
+cleanup:
+ %retval.0 = phi i8* [ %call1, %if.then ], [ %call3, %if.end ]
+ ret i8* %retval.0
+}
+
+declare i8* @foo()
+declare i32 @__gxx_personality_v0(...)
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 1, !"clang.arc.retainAutoreleasedReturnValueMarker", !"mov\09fp, fp\09\09// marker for objc_retainAutoreleaseReturnValue"}
ret void
}
+; CHECK-LABEL: define void @test14(
+; CHECK-NOT: clang.arc.noop.use
+; CHECK: ret void
+define void @test14(i8* %a, i8* %b) {
+ call void (...) @llvm.objc.clang.arc.noop.use(i8* %a, i8* %b) nounwind
+ ret void
+}
declare void @llvm.objc.clang.arc.use(...) nounwind
+declare void @llvm.objc.clang.arc.noop.use(...) nounwind
; CHECK: attributes [[NUW]] = { nounwind }
declare i8* @llvm.objc.autorelease(i8*)
declare void @llvm.objc.clang.arc.use(...)
+declare void @llvm.objc.clang.arc.noop.use(...)
declare void @test0_helper(i8*, i8**)
+declare void @can_release(i8*)
; Ensure that we honor clang.arc.use as a use and don't miscompile
; the reduced test case from <rdar://13195034>.
ret void
}
+; ARC optimizer should be able to safely remove the retain/release pair as the
+; call to @llvm.objc.clang.arc.noop.use is a no-op.
+
+; CHECK-LABEL: define void @test_arc_noop_use(
+; CHECK-NEXT: call void @can_release(i8* %x)
+; CHECK-NEXT: call void (...) @llvm.objc.clang.arc.noop.use(
+; CHECK-NEXT: ret void
+
+define void @test_arc_noop_use(i8** %out, i8* %x) {
+ call i8* @llvm.objc.retain(i8* %x)
+ call void @can_release(i8* %x)
+ call void (...) @llvm.objc.clang.arc.noop.use(i8* %x)
+ call void @llvm.objc.release(i8* %x), !clang.imprecise_release !0
+ ret void
+}
!0 = !{}
declare void @llvm.objc.autoreleasePoolPop(i8*)
declare void @llvm.objc.autoreleasePoolPush()
declare i8* @llvm.objc.retainBlock(i8*)
+declare void @llvm.objc.clang.arc.noop.use(...)
declare i8* @objc_retainedObject(i8*)
declare i8* @objc_unretainedObject(i8*)
ret i8* %v3
}
+; Remove operand bundle "clang.arc.attachedcall" and the autoreleaseRV call if the call
+; is a tail call.
+
+; CHECK-LABEL: define i8* @test31(
+; CHECK-NEXT: %[[CALL:.*]] = tail call i8* @returner()
+; CHECK-NEXT: ret i8* %[[CALL]]
+
+define i8* @test31() {
+ %call = tail call i8* @returner() [ "clang.arc.attachedcall"(i64 0) ]
+ call void (...) @llvm.objc.clang.arc.noop.use(i8* %call)
+ %1 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %call)
+ ret i8* %1
+}
+
+; CHECK-LABEL: define i8* @test32(
+; CHECK: %[[CALL:.*]] = call i8* @returner() [ "clang.arc.attachedcall"(i64 0) ]
+; CHECK: call void (...) @llvm.objc.clang.arc.noop.use(i8* %[[CALL]])
+; CHECK: call i8* @llvm.objc.autoreleaseReturnValue(i8* %[[CALL]])
+
+define i8* @test32() {
+ %call = call i8* @returner() [ "clang.arc.attachedcall"(i64 0) ]
+ call void (...) @llvm.objc.clang.arc.noop.use(i8* %call)
+ %1 = call i8* @llvm.objc.autoreleaseReturnValue(i8* %call)
+ ret i8* %1
+}
+
!0 = !{}
; CHECK: attributes [[NUW]] = { nounwind }
--- /dev/null
+; RUN: opt < %s -ipsccp -S | FileCheck %s
+; Return value can't be zapped if there is a call that has operand bundle
+; "clang.arc.attachedcall".
+
+@g0 = global i8 zeroinitializer, align 1
+
+; CHECK-LABEL: @foo(
+; CHECK: ret i8* @g0
+
+define internal i8* @foo() {
+ ret i8* @g0
+}
+
+; CHECK-LABEL: @test(
+; CHECK: %[[R:.*]] = call i8* @foo()
+; CHECK call void (...) @llvm.objc.clang.arc.noop.use(i8* %[[R]])
+
+define void @test() {
+ %r = call i8* @foo() [ "clang.arc.attachedcall"(i64 1) ]
+ call void (...) @llvm.objc.clang.arc.noop.use(i8* %r)
+ ret void
+}
+
+declare void @llvm.objc.clang.arc.noop.use(...)
exit:
ret void
}
+
+; CHECK-LABEL: @test_clang_arc_attachedcall(
+; CHECK: tail call i8* @getObj(
+
+declare i8* @getObj()
+
+define i8* @test_clang_arc_attachedcall() {
+ %r = call i8* @getObj() [ "clang.arc.attachedcall"(i64 0) ]
+ ret i8* %r
+}
; RUN: not opt -verify < %s 2>&1 | FileCheck %s
+%0 = type opaque
+declare void @g()
+declare %0* @foo0()
+declare i8 @foo1()
+
; Operand bundles uses are like regular uses, and need to be dominated
; by their defs.
-declare void @g()
-
define void @f0(i32* %ptr) {
; CHECK: Instruction does not dominate all uses!
; CHECK-NEXT: %x = add i32 42, 1
%x = add i32 42, 1
ret void
}
+
+define void @f_clang_arc_attachedcall() {
+; CHECK: Multiple "clang.arc.attachedcall" operand bundles
+; CHECK-NEXT: call %0* @foo0() [ "clang.arc.attachedcall"(i64 0), "clang.arc.attachedcall"(i64 0) ]
+; CHECK-NEXT: must call a function returning a pointer
+; CHECK-NEXT: call i8 @foo1() [ "clang.arc.attachedcall"(i64 0) ]
+
+ call %0* @foo0() [ "clang.arc.attachedcall"(i64 0) ]
+ call %0* @foo0() [ "clang.arc.attachedcall"(i64 0), "clang.arc.attachedcall"(i64 0) ]
+ call i8 @foo1() [ "clang.arc.attachedcall"(i64 0) ]
+ ret void
+}