if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
isInitializerOfDynamicClass(*B))
- CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
}
if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
isInitializerOfDynamicClass(*B))
- CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
}
// Initialize the vtable pointers before entering the body.
if (!CanSkipVTablePointerInitialization(*this, Dtor)) {
- // Insert the llvm.invariant.group.barrier intrinsic before initializing
+ // Insert the llvm.launder.invariant.group intrinsic before initializing
// the vptrs to cancel any previous assumptions we might have made.
if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0)
- CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
InitializeVTablePointers(Dtor->getParent());
}
hasAnyVptr(FieldType, getContext()))
// Because unions can easily skip invariant.barriers, we need to add
// a barrier every time CXXRecord field with vptr is referenced.
- addr = Address(Builder.CreateInvariantGroupBarrier(addr.getPointer()),
+ addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
addr.getAlignment());
} else {
// For structs, we GEP to the field that the record layout suggests.
llvm::Type *elementTy = ConvertTypeForMem(allocType);
Address result = Builder.CreateElementBitCast(allocation, elementTy);
- // Passing pointer through invariant.group.barrier to avoid propagation of
+ // Passing pointer through launder.invariant.group to avoid propagation of
// vptrs information which may be included in previous type.
// To not break LTO with different optimizations levels, we do it regardless
// of optimization level.
if (CGM.getCodeGenOpts().StrictVTablePointers &&
allocator->isReservedGlobalPlacementOperator())
- result = Address(Builder.CreateInvariantGroupBarrier(result.getPointer()),
+ result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
result.getAlignment());
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
// Checking D::D()
// CHECK-LABEL: define linkonce_odr void @_ZN1DC2Ev(
-// CHECK: = call i8* @llvm.invariant.group.barrier.p0i8(i8*
+// CHECK: = call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK: call void @_ZN1AC2Ev(%struct.A*
// CHECK: store {{.*}} !invariant.group ![[MD]]
};
// CHECK-NEW-LABEL: define void @_Z12LocalObjectsv()
-// CHECK-NEW-NOT: @llvm.invariant.group.barrier.p0i8(
+// CHECK-NEW-NOT: @llvm.launder.invariant.group.p0i8(
// CHECK-NEW-LABEL: {{^}}}
void LocalObjects() {
DynamicBase1 DB;
struct DynamicFromVirtualStatic1;
// CHECK-CTORS-LABEL: define linkonce_odr void @_ZN25DynamicFromVirtualStatic1C1Ev
-// CHECK-CTORS-NOT: @llvm.invariant.group.barrier.p0i8(
+// CHECK-CTORS-NOT: @llvm.launder.invariant.group.p0i8(
// CHECK-CTORS-LABEL: {{^}}}
struct DynamicFrom2Virtuals;
// CHECK-CTORS-LABEL: define linkonce_odr void @_ZN20DynamicFrom2VirtualsC1Ev
-// CHECK-CTORS: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-CTORS: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-CTORS-LABEL: {{^}}}
// CHECK-NEW-LABEL: define void @_Z9Pointers1v()
-// CHECK-NEW-NOT: @llvm.invariant.group.barrier.p0i8(
+// CHECK-NEW-NOT: @llvm.launder.invariant.group.p0i8(
// CHECK-NEW-LABEL: call void @_ZN12DynamicBase1C1Ev(
-// CHECK-NEW: %[[THIS3:.*]] = call i8* @llvm.invariant.group.barrier.p0i8(i8* %[[THIS2:.*]])
+// CHECK-NEW: %[[THIS3:.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* %[[THIS2:.*]])
// CHECK-NEW: %[[THIS4:.*]] = bitcast i8* %[[THIS3]] to %[[DynamicDerived:.*]]*
// CHECK-NEW: call void @_ZN14DynamicDerivedC1Ev(%[[DynamicDerived:.*]]* %[[THIS4]])
// CHECK-NEW-LABEL: {{^}}}
// CHECK-NEW-LABEL: define void @_Z14HackingObjectsv()
// CHECK-NEW: call void @_ZN12DynamicBase1C1Ev
-// CHECK-NEW: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-NEW: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-NEW: call void @_ZN14DynamicDerivedC1Ev(
-// CHECK-NEW: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-NEW: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-NEW: call void @_ZN12DynamicBase1C1Ev(
// CHECK-NEW-LABEL: {{^}}}
void HackingObjects() {
/*** Testing Constructors ***/
struct DynamicBase1;
// CHECK-CTORS-LABEL: define linkonce_odr void @_ZN12DynamicBase1C2Ev(
-// CHECK-CTORS-NOT: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-CTORS-NOT: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-CTORS-LABEL: {{^}}}
// CHECK-CTORS-LABEL: define linkonce_odr void @_ZN14DynamicDerivedC2Ev(
// CHECK-CTORS: %[[THIS0:.*]] = load %[[DynamicDerived:.*]]*, %[[DynamicDerived]]** {{.*}}
// CHECK-CTORS: %[[THIS1:.*]] = bitcast %[[DynamicDerived:.*]]* %[[THIS0]] to i8*
-// CHECK-CTORS: %[[THIS2:.*]] = call i8* @llvm.invariant.group.barrier.p0i8(i8* %[[THIS1:.*]])
+// CHECK-CTORS: %[[THIS2:.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* %[[THIS1:.*]])
// CHECK-CTORS: %[[THIS3:.*]] = bitcast i8* %[[THIS2]] to %[[DynamicDerived]]*
// CHECK-CTORS: %[[THIS4:.*]] = bitcast %[[DynamicDerived]]* %[[THIS3]] to %[[DynamicBase:.*]]*
// CHECK-CTORS: call void @_ZN12DynamicBase1C2Ev(%[[DynamicBase]]* %[[THIS4]])
// CHECK-CTORS: %[[THIS0:.*]] = load %[[CLASS:.*]]*, %[[CLASS]]** {{.*}}
// CHECK-CTORS: %[[THIS1:.*]] = bitcast %[[CLASS:.*]]* %[[THIS0]] to i8*
-// CHECK-CTORS: %[[THIS2:.*]] = call i8* @llvm.invariant.group.barrier.p0i8(i8* %[[THIS1]])
+// CHECK-CTORS: %[[THIS2:.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* %[[THIS1]])
// CHECK-CTORS: %[[THIS3:.*]] = bitcast i8* %[[THIS2]] to %[[CLASS]]*
// CHECK-CTORS: %[[THIS4:.*]] = bitcast %[[CLASS]]* %[[THIS3]] to %[[BASE_CLASS:.*]]*
// CHECK-CTORS: call void @_ZN12DynamicBase1C2Ev(%[[BASE_CLASS]]* %[[THIS4]])
-// CHECK-CTORS: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-CTORS: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-CTORS: call void @_ZN12DynamicBase2C2Ev(
-// CHECK-CTORS-NOT: @llvm.invariant.group.barrier.p0i8
+// CHECK-CTORS-NOT: @llvm.launder.invariant.group.p0i8
// CHECK-CTORS: %[[THIS10:.*]] = bitcast %struct.DynamicDerivedMultiple* %[[THIS0]] to i32 (...)***
struct DynamicFromStatic;
// CHECK-CTORS-LABEL: define linkonce_odr void @_ZN17DynamicFromStaticC2Ev(
-// CHECK-CTORS-NOT: @llvm.invariant.group.barrier.p0i8(
+// CHECK-CTORS-NOT: @llvm.launder.invariant.group.p0i8(
// CHECK-CTORS-LABEL: {{^}}}
struct A {
void UnionsBarriers(U *u) {
// CHECK-NEW: call void @_Z9changeToBP1U(
changeToB(u);
- // CHECK-NEW: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: call void @_Z2g2P1A(%struct.A*
g2(&u->b);
// CHECK-NEW: call void @_Z9changeToAP1U(%union.U*
changeToA(u);
- // CHECK-NEW: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW: call i8* @llvm.launder.invariant.group.p0i8(i8*
// call void @_Z2g2P1A(%struct.A* %a)
g2(&u->a);
- // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW-NOT: call i8* @llvm.launder.invariant.group.p0i8(i8*
}
struct HoldingVirtuals {
// CHECK-NEW-LABEL: noBarriers
void noBarriers(NoVptrs &noVptrs) {
- // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW-NOT: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: 42
noVptrs.a += 42;
- // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW-NOT: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: call void @_Z4takeR12AnotherEmpty(
take(noVptrs.empty);
}
// CHECK-NEW-LABEL: define void @_Z15UnionsBarriers2R2U2
void UnionsBarriers2(U2 &u) {
- // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW-NOT: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: 42
u.z += 42;
- // CHECK-NEW: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: call void @_Z4takeR15HoldingVirtuals(
take(u.h);
}
void take(VirtualInheritance &);
void UnionsBarrier3(U3 &u) {
- // CHECK-NEW-NOT: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW-NOT: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: 42
u.z += 42;
- // CHECK-NEW: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: call void @_Z4takeR13VirtualInBase(
take(u.v1);
- // CHECK-NEW: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: call void @_Z4takeR13VirtualInBase(
take(u.v2);
- // CHECK-NEW: call i8* @llvm.invariant.group.barrier.p0i8(i8*
+ // CHECK-NEW: call i8* @llvm.launder.invariant.group.p0i8(i8*
// CHECK-NEW: call void @_Z4takeR18VirtualInheritance(
take(u.v3);
}
/** DTORS **/
// CHECK-DTORS-LABEL: define linkonce_odr void @_ZN10StaticBaseD2Ev(
-// CHECK-DTORS-NOT: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-DTORS-NOT: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-DTORS-LABEL: {{^}}}
// CHECK-DTORS-LABEL: {{^}}}
// CHECK-DTORS-LABEL: define linkonce_odr void @_ZN17DynamicFromStaticD2Ev
-// CHECK-DTORS-NOT: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-DTORS-NOT: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-DTORS-LABEL: {{^}}}
// CHECK-DTORS-LABEL: define linkonce_odr void @_ZN22DynamicDerivedMultipleD2Ev(
// CHECK-DTORS-LABEL: define linkonce_odr void @_ZN12DynamicBase2D2Ev(
-// CHECK-DTORS: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-DTORS: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-DTORS-LABEL: {{^}}}
// CHECK-DTORS-LABEL: define linkonce_odr void @_ZN12DynamicBase1D2Ev
-// CHECK-DTORS: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-DTORS: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-DTORS-LABEL: {{^}}}
// CHECK-DTORS-LABEL: define linkonce_odr void @_ZN14DynamicDerivedD2Ev
-// CHECK-DTORS-NOT: call i8* @llvm.invariant.group.barrier.p0i8(
+// CHECK-DTORS-NOT: call i8* @llvm.launder.invariant.group.p0i8(
// CHECK-DTORS-LABEL: {{^}}}
The existence of the ``invariant.group`` metadata on the instruction tells
the optimizer that every ``load`` and ``store`` to the same pointer operand
within the same invariant group can be assumed to load or store the same
-value (but see the ``llvm.invariant.group.barrier`` intrinsic which affects
+value (but see the ``llvm.launder.invariant.group`` intrinsic which affects
when two pointers are considered the same). Pointers returned by bitcast or
getelementptr with only zero indices are considered the same.
store i8 %unknownValue, i8* %ptr, !invariant.group !0 ; Can assume that %unknownValue == 42
call void @foo(i8* %ptr)
- %newPtr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
- %d = load i8, i8* %newPtr2, !invariant.group !0 ; Can't step through invariant.group.barrier to get value of %ptr
+ %newPtr2 = call i8* @llvm.launder.invariant.group(i8* %ptr)
+ %d = load i8, i8* %newPtr2, !invariant.group !0 ; Can't step through launder.invariant.group to get value of %ptr
...
declare void @foo(i8*)
declare i8* @getPointer(i8*)
- declare i8* @llvm.invariant.group.barrier(i8*)
+ declare i8* @llvm.launder.invariant.group(i8*)
!0 = !{!"magic ptr"}
!1 = !{!"other ptr"}
This intrinsic indicates that the memory is mutable again.
-'``llvm.invariant.group.barrier``' Intrinsic
+'``llvm.launder.invariant.group``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
::
- declare i8* @llvm.invariant.group.barrier.p0i8(i8* <ptr>)
+ declare i8* @llvm.launder.invariant.group.p0i8(i8* <ptr>)
Overview:
"""""""""
-The '``llvm.invariant.group.barrier``' intrinsic can be used when an invariant
+The '``llvm.launder.invariant.group``' intrinsic can be used when an invariant
established by invariant.group metadata no longer holds, to obtain a new pointer
value that does not carry the invariant information. It is an experimental
intrinsic, which means that its semantics might change in the future.
Arguments:
""""""""""
-The ``llvm.invariant.group.barrier`` takes only one argument, which is
+The ``llvm.launder.invariant.group`` takes only one argument, which is
the pointer to the memory for which the ``invariant.group`` no longer holds.
Semantics:
Returns another pointer that aliases its argument but which is considered different
for the purposes of ``load``/``store`` ``invariant.group`` metadata.
+It does not read any accessible memory and the execution can be speculated.
.. _constrainedfp:
have changed. Alignment is no longer an argument, and are instead conveyed as
parameter attributes.
+* invariant.group.barrier has been renamed to launder.invariant.group.
+
Changes to the ARM Backend
--------------------------
Name);
}
- /// Create an invariant.group.barrier intrinsic call, that stops
- /// optimizer to propagate equality using invariant.group metadata.
- /// If Ptr type is different from pointer to i8, it's casted to pointer to i8
- /// in the same address space before call and casted back to Ptr type after
- /// call.
- Value *CreateInvariantGroupBarrier(Value *Ptr) {
+ /// Create a launder.invariant.group intrinsic call. If Ptr type is
+ /// different from pointer to i8, it's casted to pointer to i8 in the same
+ /// address space before call and casted back to Ptr type after call.
+ Value *CreateLaunderInvariantGroup(Value *Ptr) {
assert(isa<PointerType>(Ptr->getType()) &&
- "invariant.group.barrier only applies to pointers.");
+ "launder.invariant.group only applies to pointers.");
auto *PtrType = Ptr->getType();
auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
if (PtrType != Int8PtrTy)
Ptr = CreateBitCast(Ptr, Int8PtrTy);
Module *M = BB->getParent()->getParent();
- Function *FnInvariantGroupBarrier = Intrinsic::getDeclaration(
- M, Intrinsic::invariant_group_barrier, {Int8PtrTy});
+ Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
+ M, Intrinsic::launder_invariant_group, {Int8PtrTy});
- assert(FnInvariantGroupBarrier->getReturnType() == Int8PtrTy &&
- FnInvariantGroupBarrier->getFunctionType()->getParamType(0) ==
+ assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
+ FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
Int8PtrTy &&
- "InvariantGroupBarrier should take and return the same type");
+ "LaunderInvariantGroup should take and return the same type");
- CallInst *Fn = CreateCall(FnInvariantGroupBarrier, {Ptr});
+ CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
if (PtrType != Int8PtrTy)
return CreateBitCast(Fn, PtrType);
llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<2>]>;
-// invariant.group.barrier can't be marked with 'readnone' (IntrNoMem),
+// launder.invariant.group can't be marked with 'readnone' (IntrNoMem),
// because it would cause CSE of two barriers with the same argument.
// Inaccessiblememonly says that the barrier doesn't read the argument,
// but it changes state not accessible to this module. This way
// it would remove barrier.
// Note that it is still experimental, which means that its semantics
// might change in the future.
-def int_invariant_group_barrier : Intrinsic<[llvm_anyptr_ty],
+def int_launder_invariant_group : Intrinsic<[llvm_anyptr_ty],
[LLVMMatchType<0>],
- [IntrInaccessibleMemOnly]>;
+ [IntrInaccessibleMemOnly, IntrSpeculatable]>;
//===------------------------ Stackmap Intrinsics -------------------------===//
//
static_cast<const Value *>(this)->stripPointerCasts());
}
- /// Strip off pointer casts, all-zero GEPs, aliases and barriers.
+ /// Strip off pointer casts, all-zero GEPs, aliases and invariant group
+ /// info.
///
/// Returns the original uncasted value. If this is called on a non-pointer
/// value, it returns 'this'. This function should be used only in
/// Alias analysis.
- const Value *stripPointerCastsAndBarriers() const;
- Value *stripPointerCastsAndBarriers() {
+ const Value *stripPointerCastsAndInvariantGroups() const;
+ Value *stripPointerCastsAndInvariantGroups() {
return const_cast<Value *>(
- static_cast<const Value *>(this)->stripPointerCastsAndBarriers());
+ static_cast<const Value *>(this)->stripPointerCastsAndInvariantGroups());
}
/// Strip off pointer casts and all-zero GEPs.
const GEPOperator *GEP2,
uint64_t V2Size,
const DataLayout &DL) {
- assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
"Expected GEPs with the same pointer operand");
// If we know the two GEPs are based off of the exact same pointer (and not
// just the same underlying object), see if that tells us anything about
// the resulting pointers.
- if (GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
// If we couldn't find anything interesting, don't abandon just yet.
return NoAlias;
// Strip off any casts if they exist.
- V1 = V1->stripPointerCastsAndBarriers();
- V2 = V2->stripPointerCastsAndBarriers();
+ V1 = V1->stripPointerCastsAndInvariantGroups();
+ V2 = V2->stripPointerCastsAndInvariantGroups();
// If V1 or V2 is undef, the result is NoAlias because we can always pick a
// value for undef that aliases nothing in the program.
const Instruction *I) {
// If the memory can't be changed, then loads of the memory can't be
// clobbered.
- //
- // FIXME: We should handle invariant groups, as well. It's a bit harder,
- // because we need to pay close attention to invariant group barriers.
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
AA.pointsToConstantMemory(cast<LoadInst>(I)->
getPointerOperand()));
InsertedInsts.insert(ExtVal);
return true;
}
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
II->replaceAllUsesWith(II->getArgOperand(0));
II->eraseFromParent();
return true;
updateValueMap(II, ResultReg);
return true;
}
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
case Intrinsic::expect: {
unsigned ResultReg = getRegForValue(II->getArgOperand(0));
if (!ResultReg)
}
case Intrinsic::annotation:
case Intrinsic::ptr_annotation:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
// Drop the intrinsic, but forward the value
setValue(&I, getValue(I.getOperand(0)));
return nullptr;
return true;
}
}
+ if (Name.startswith("invariant.group.barrier")) {
+ // Rename invariant.group.barrier to launder.invariant.group
+ auto Args = F->getFunctionType()->params();
+ Type* ObjectPtr[1] = {Args[0]};
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::launder_invariant_group, ObjectPtr);
+ return true;
+
+ }
+
break;
}
case 'm': {
enum PointerStripKind {
PSK_ZeroIndices,
PSK_ZeroIndicesAndAliases,
- PSK_ZeroIndicesAndAliasesAndBarriers,
+ PSK_ZeroIndicesAndAliasesAndInvariantGroups,
PSK_InBoundsConstantIndices,
PSK_InBounds
};
if (auto *GEP = dyn_cast<GEPOperator>(V)) {
switch (StripKind) {
case PSK_ZeroIndicesAndAliases:
- case PSK_ZeroIndicesAndAliasesAndBarriers:
+ case PSK_ZeroIndicesAndAliasesAndInvariantGroups:
case PSK_ZeroIndices:
if (!GEP->hasAllZeroIndices())
return V;
V = RV;
continue;
}
- // The result of invariant.group.barrier must alias it's argument,
+ // The result of launder.invariant.group must alias it's argument,
// but it can't be marked with returned attribute, that's why it needs
// special case.
- if (StripKind == PSK_ZeroIndicesAndAliasesAndBarriers &&
- CS.getIntrinsicID() == Intrinsic::invariant_group_barrier) {
+ if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
+ CS.getIntrinsicID() == Intrinsic::launder_invariant_group) {
V = CS.getArgOperand(0);
continue;
}
return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
}
-const Value *Value::stripPointerCastsAndBarriers() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndBarriers>(
+const Value *Value::stripPointerCastsAndInvariantGroups() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndInvariantGroups>(
this);
}
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
case Intrinsic::objectsize:
return true;
default:
}
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
Intr->eraseFromParent();
// FIXME: I think the invariant marker should still theoretically apply,
// but the intrinsics need to be changed to accept pointers with any
; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
;
; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
-; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
+; launder.invariant.group intrinsics entirely. We'll need to pay attention to
; them when/if we decide to support invariant groups.
@g = external global i32
%1 = bitcast i32* %a to i8*
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; This have to be MemoryUse(2), because we can't skip the barrier based on
%1 = bitcast i32* %a to i8*
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; We can skip the barrier only if the "skip" is not based on !invariant.group.
%1 = bitcast i32* %a to i8*
; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; We can skip the barrier only if the "skip" is not based on !invariant.group.
store i32 1, i32* @g, align 4
%1 = bitcast i32* %a to i8*
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; CHECK: MemoryUse(2)
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End
Loop.Body:
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End
Loop.Body:
; CHECK-NEXT: call void @clobber
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Pre, label %Loop.End
Loop.Pre:
; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; FIXME: This one could be CSEd.
; CHECK: 3 = MemoryDef(2)
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 4 = MemoryDef(3)
; CHECK-NEXT: call void @clobber8(i8* %ptr)
call void @clobber8(i8* %ptr)
; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 3 = MemoryDef(2)
store i8 43, i8* %ptr
; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 5 = MemoryDef(4)
; CHECK-NEXT: call void @clobber8(i8* %ptr)
call void @clobber8(i8* %ptr)
}
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare void @clobber(i32*)
declare void @clobber8(i8*)
declare void @use(i8* readonly)
--- /dev/null
+; RUN: opt -S < %s | FileCheck %s
+
+; The intrinsic firstly only took i8*, then it was made polimorphic, then
+; it was renamed to launder.invariant.group
+define void @test(i8* %p1, i16* %p16) {
+; CHECK-LABEL: @test
+; CHECK: %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1)
+; CHECK: %p3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1)
+; CHECK: %p4 = call i16* @llvm.launder.invariant.group.p0i16(i16* %p16)
+ %p2 = call i8* @llvm.invariant.group.barrier(i8* %p1)
+ %p3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p1)
+ %p4 = call i16* @llvm.invariant.group.barrier.p0i16(i16* %p16)
+ ret void
+}
+
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable
+; CHECK: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable
+; CHECK: declare i16* @llvm.launder.invariant.group.p0i16(i16*)
+declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i16* @llvm.invariant.group.barrier.p0i16(i16*)
; RUN: llc -O0 -mtriple=arm64 < %s
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
define i8* @barrier(i8* %p) {
-; CHECK: bl llvm.invariant.group.barrier
- %q = call i8* @llvm.invariant.group.barrier(i8* %p)
+; CHECK: bl llvm.launder.invariant.group
+ %q = call i8* @llvm.launder.invariant.group(i8* %p)
ret i8* %q
}
declare {}* @llvm.invariant.start.p5i8(i64, i8 addrspace(5)* nocapture) #0
declare void @llvm.invariant.end.p5i8({}*, i64, i8 addrspace(5)* nocapture) #0
-declare i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)*) #1
+declare i8 addrspace(5)* @llvm.launder.invariant.group.p5i8(i8 addrspace(5)*) #1
; GCN-LABEL: {{^}}use_invariant_promotable_lds:
; GCN: buffer_load_dword
store i32 %tmp3, i32 addrspace(5)* %tmp
%tmp4 = call {}* @llvm.invariant.start.p5i8(i64 4, i8 addrspace(5)* %tmp1) #0
call void @llvm.invariant.end.p5i8({}* %tmp4, i64 4, i8 addrspace(5)* %tmp1) #0
- %tmp5 = call i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)* %tmp1) #1
+ %tmp5 = call i8 addrspace(5)* @llvm.launder.invariant.group.p5i8(i8 addrspace(5)* %tmp1) #1
ret void
}
ret double %I
}
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
define i8* @barrier(i8* %p) {
- %q = call i8* @llvm.invariant.group.barrier(i8* %p)
+ %q = call i8* @llvm.launder.invariant.group(i8* %p)
ret i8* %q
}
+++ /dev/null
-; RUN: opt -S -gvn < %s | FileCheck %s
-; RUN: opt -S -newgvn < %s | FileCheck %s
-; RUN: opt -S -O3 < %s | FileCheck %s
-
-; This test check if optimizer is not proving equality based on mustalias
-; CHECK-LABEL: define void @dontProveEquality(i8* %a)
-define void @dontProveEquality(i8* %a) {
- %b = call i8* @llvm.invariant.group.barrier(i8* %a)
- %r = i1 icmp eq i8* %b, i8* %a
-;CHECK: call void @use(%r)
- call void @use(%r)
-}
-
-declare void @use(i1)
-declare i8* @llvm.invariant.group.barrier(i8 *)
; RUN: opt -S -O3 < %s | FileCheck %s
; These tests checks if passes with CSE functionality can do CSE on
-; invariant.group.barrier, that is prohibited if there is a memory clobber
+; launder.invariant.group, that is prohibited if there is a memory clobber
; between barriers call.
; CHECK-LABEL: define i8 @optimizable()
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; FIXME: This one could be CSE
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr)
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
call void @clobber(i8* %ptr)
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr)
; CHECK: call void @use(i8* {{.*}}%ptr2)
define i8 @unoptimizable2() {
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
store i8 43, i8* %ptr
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr)
; CHECK: call void @use(i8* {{.*}}%ptr2)
ret i8 %v
}
+; This test check if optimizer is not proving equality based on mustalias
+; CHECK-LABEL: define void @dontProveEquality(i8* %a)
+define void @dontProveEquality(i8* %a) {
+ %b = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
+ %r = icmp eq i8* %b, %a
+;CHECK: call void @useBool(i1 %r)
+ call void @useBool(i1 %r)
+ ret void
+}
+
declare void @use(i8* readonly)
+declare void @useBool(i1)
declare void @clobber(i8*)
-; CHECK: Function Attrs: inaccessiblememonly nounwind{{$}}
-; CHECK-NEXT: declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable{{$}}
+; CHECK-NEXT: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
!0 = !{}
define void @foo() {
enter:
; CHECK-NOT: !invariant.group
- ; CHECK-NOT: @llvm.invariant.group.barrier.p0i8(
+ ; CHECK-NOT: @llvm.launder.invariant.group.p0i8(
; CHECK: %val = load i8, i8* @tmp, !tbaa
%val = load i8, i8* @tmp, !invariant.group !0, !tbaa !{!1, !1, i64 0}
- %ptr = call i8* @llvm.invariant.group.barrier.p0i8(i8* @tmp)
+ %ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp)
; CHECK: store i8 42, i8* @tmp
store i8 42, i8* %ptr, !invariant.group !0
}
; CHECK-LABEL: }
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
!0 = !{!"something"}
!1 = !{!"x", !0}
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr, !invariant.group !0
call void @foo(i8* %ptr2); call to use %ptr2
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%a = load i8, i8* %ptr2, !invariant.group !0
; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
store i8 %unknownValue, i8* %ptr, !invariant.group !0
- %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%d = load i8, i8* %newPtr2, !invariant.group !0
; CHECK: ret i8 %unknownValue
declare void @_ZN1AC1Ev(%struct.A*)
declare void @fooBit(i1*, i1)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
; Function Attrs: nounwind
declare void @llvm.assume(i1 %cmp.vtables) #0
store i32 %val, i32* %valptr
%0 = bitcast i32* %valptr to i8*
- %barr = call i8* @llvm.invariant.group.barrier(i8* %0)
+ %barr = call i8* @llvm.launder.invariant.group(i8* %0)
%1 = bitcast i8* %barr to i32*
%val2 = load i32, i32* %1
ret void
}
-; We can't step through invariant.group.barrier here, because that would change
+; We can't step through launder.invariant.group here, because that would change
; this load in @usage_of_globals()
; val = load i32, i32* %ptrVal, !invariant.group !0
; into
store i32 13, i32* @tmp3, !invariant.group !0
%0 = bitcast i32* @tmp3 to i8*
- %barr = call i8* @llvm.invariant.group.barrier(i8* %0)
+ %barr = call i8* @llvm.launder.invariant.group(i8* %0)
%1 = bitcast i8* %barr to i32*
store i32* %1, i32** @ptrToTmp3
declare void @changeTmp3ValAndCallBarrierInside()
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
!0 = !{!"something"}
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr, !invariant.group !0
call void @foo(i8* %ptr2); call to use %ptr2
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK-NOT: load
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr2, !invariant.group !0
; CHECK: ret i8 42
; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
store i8 %unknownValue, i8* %ptr, !invariant.group !0
- %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%d = load i8, i8* %newPtr2, !invariant.group !0
; CHECK: ret i8 %unknownValue
declare void @_ZN1AC1Ev(%struct.A*)
declare void @fooBit(i1*, i1)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
; Function Attrs: nounwind
declare void @llvm.assume(i1 %cmp.vtables) #0