inline const Value *GetUnderlyingObjCPtr(const Value *V,
const DataLayout &DL) {
for (;;) {
- V = GetUnderlyingObject(V, DL);
+ V = getUnderlyingObject(V, DL);
if (!IsForwarding(GetBasicARCInstKind(V)))
break;
V = cast<CallInst>(V)->getArgOperand(0);
/// that the returned value has pointer type if the specified value does. If
/// the MaxLookup value is non-zero, it limits the number of instructions to
/// be stripped off.
- Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
+ Value *getUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup = 6);
- inline const Value *GetUnderlyingObject(const Value *V, const DataLayout &DL,
+ inline const Value *getUnderlyingObject(const Value *V, const DataLayout &DL,
unsigned MaxLookup = 6) {
- return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
+ return getUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
}
- /// This method is similar to GetUnderlyingObject except that it can
+ /// This method is similar to getUnderlyingObject except that it can
/// look through phi and select instructions and return multiple objects.
///
/// If LoopInfo is passed, loop phis are further analyzed. If a pointer
/// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
/// should not assume that Curr and Prev share the same underlying object thus
/// it shouldn't look through the phi above.
- void GetUnderlyingObjects(const Value *V,
+ void getUnderlyingObjects(const Value *V,
SmallVectorImpl<const Value *> &Objects,
const DataLayout &DL, LoopInfo *LI = nullptr,
unsigned MaxLookup = 6);
- /// This is a wrapper around GetUnderlyingObjects and adds support for basic
+ /// This is a wrapper around getUnderlyingObjects and adds support for basic
/// ptrtoint+arithmetic+inttoptr sequences.
bool getUnderlyingObjectsForCodeGen(const Value *V,
SmallVectorImpl<Value *> &Objects,
return ModRefInfo::ModRef;
const Value *Object =
- GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
+ getUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return ModRefInfo::ModRef;
const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
// The max limit of the search depth in DecomposeGEPExpression() and
-// GetUnderlyingObject(), both functions need to use the same search
+// getUnderlyingObject(), both functions need to use the same search
// depth otherwise the algorithm in aliasGEP will assert.
static const unsigned MaxLookupSearchDepth = 6;
/// such, the gep cannot necessarily be reconstructed from its decomposed form.
///
/// When DataLayout is around, this function is capable of analyzing everything
-/// that GetUnderlyingObject can look through. To be able to do that
-/// GetUnderlyingObject and DecomposeGEPExpression must use the same search
+/// that getUnderlyingObject can look through. To be able to do that
+/// getUnderlyingObject and DecomposeGEPExpression must use the same search
/// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
/// through pointer casts.
bool BasicAAResult::DecomposeGEPExpression(const Value *V,
SmallVector<const Value *, 16> Worklist;
Worklist.push_back(Loc.Ptr);
do {
- const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
+ const Value *V = getUnderlyingObject(Worklist.pop_back_val(), DL);
if (!Visited.insert(V).second) {
Visited.clear();
return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
assert(notDifferentParent(Call, Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
- const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
+ const Value *Object = getUnderlyingObject(Loc.Ptr, DL);
// Calls marked 'tail' cannot read or write allocas from the current frame
// because the current frame might be destroyed by the time they run. However,
/// another pointer.
///
/// We know that V1 is a GEP, but we don't know anything about V2.
-/// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
+/// UnderlyingV1 is getUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
/// V2.
AliasResult BasicAAResult::aliasGEP(
const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
"DecomposeGEPExpression returned a result different from "
- "GetUnderlyingObject");
+ "getUnderlyingObject");
// If the GEP's offset relative to its base is such that the base would
// fall below the start of the object underlying V2, then the GEP and V2
// Figure out what objects these things are pointing to if we can.
if (O1 == nullptr)
- O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
+ O1 = getUnderlyingObject(V1, DL, MaxLookupSearchDepth);
if (O2 == nullptr)
- O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
+ O2 = getUnderlyingObject(V2, DL, MaxLookupSearchDepth);
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
// The pointer is not captured if returned pointer is not captured.
// NOTE: CaptureTracking users should not assume that only functions
// marked with nocapture do not capture. This means that places like
- // GetUnderlyingObject in ValueTracking or DecomposeGEPExpression
+ // getUnderlyingObject in ValueTracking or DecomposeGEPExpression
// in BasicAA also need to know about this property.
if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call,
true)) {
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
- if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
+ if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE, DL))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
if (GV->getInitializer()->isNullValue())
return Constant::getNullValue(Ty);
return NoAlias;
// Check the underlying objects are the same
- const Value *AObj = GetUnderlyingObject(LocA.Ptr, DL);
- const Value *BObj = GetUnderlyingObject(LocB.Ptr, DL);
+ const Value *AObj = getUnderlyingObject(LocA.Ptr, DL);
+ const Value *BObj = getUnderlyingObject(LocB.Ptr, DL);
// If the underlying objects are the same, they must alias
if (AObj == BObj)
// An option to enable unsafe alias results from the GlobalsModRef analysis.
// When enabled, GlobalsModRef will provide no-alias results which in extremely
// rare cases may not be conservatively correct. In particular, in the face of
-// transforms which cause assymetry between how effective GetUnderlyingObject
+// transforms which cause assymetry between how effective getUnderlyingObject
// is for two pointers, it may produce incorrect results.
//
// These unsafe results have been returned by GMR for many years without
continue;
// Check the value being stored.
- Value *Ptr = GetUnderlyingObject(SI->getOperand(0),
+ Value *Ptr = getUnderlyingObject(SI->getOperand(0),
GV->getParent()->getDataLayout());
if (!isAllocLikeFn(Ptr, &GetTLI(*SI->getFunction())))
return false;
if (auto *LI = dyn_cast<LoadInst>(Input)) {
- Inputs.push_back(GetUnderlyingObject(LI->getPointerOperand(), DL));
+ Inputs.push_back(getUnderlyingObject(LI->getPointerOperand(), DL));
continue;
}
if (auto *SI = dyn_cast<SelectInst>(Input)) {
- const Value *LHS = GetUnderlyingObject(SI->getTrueValue(), DL);
- const Value *RHS = GetUnderlyingObject(SI->getFalseValue(), DL);
+ const Value *LHS = getUnderlyingObject(SI->getTrueValue(), DL);
+ const Value *RHS = getUnderlyingObject(SI->getFalseValue(), DL);
if (Visited.insert(LHS).second)
Inputs.push_back(LHS);
if (Visited.insert(RHS).second)
}
if (auto *PN = dyn_cast<PHINode>(Input)) {
for (const Value *Op : PN->incoming_values()) {
- Op = GetUnderlyingObject(Op, DL);
+ Op = getUnderlyingObject(Op, DL);
if (Visited.insert(Op).second)
Inputs.push_back(Op);
}
if (auto *LI = dyn_cast<LoadInst>(Input)) {
// A pointer loaded from a global would have been captured, and we know
// that the global is non-escaping, so no alias.
- const Value *Ptr = GetUnderlyingObject(LI->getPointerOperand(), DL);
+ const Value *Ptr = getUnderlyingObject(LI->getPointerOperand(), DL);
if (isNonEscapingGlobalNoAliasWithLoad(GV, Ptr, Depth, DL))
// The load does not alias with GV.
continue;
return false;
}
if (auto *SI = dyn_cast<SelectInst>(Input)) {
- const Value *LHS = GetUnderlyingObject(SI->getTrueValue(), DL);
- const Value *RHS = GetUnderlyingObject(SI->getFalseValue(), DL);
+ const Value *LHS = getUnderlyingObject(SI->getTrueValue(), DL);
+ const Value *RHS = getUnderlyingObject(SI->getFalseValue(), DL);
if (Visited.insert(LHS).second)
Inputs.push_back(LHS);
if (Visited.insert(RHS).second)
}
if (auto *PN = dyn_cast<PHINode>(Input)) {
for (const Value *Op : PN->incoming_values()) {
- Op = GetUnderlyingObject(Op, DL);
+ Op = getUnderlyingObject(Op, DL);
if (Visited.insert(Op).second)
Inputs.push_back(Op);
}
const MemoryLocation &LocB,
AAQueryInfo &AAQI) {
// Get the base object these pointers point to.
- const Value *UV1 = GetUnderlyingObject(LocA.Ptr, DL);
- const Value *UV2 = GetUnderlyingObject(LocB.Ptr, DL);
+ const Value *UV1 = getUnderlyingObject(LocA.Ptr, DL);
+ const Value *UV2 = getUnderlyingObject(LocB.Ptr, DL);
// If either of the underlying values is a global, they may be non-addr-taken
// globals, which we can answer queries about.
// is based on GV, return the conservative result.
for (auto &A : Call->args()) {
SmallVector<const Value*, 4> Objects;
- GetUnderlyingObjects(A, Objects, DL);
+ getUnderlyingObjects(A, Objects, DL);
// All objects must be identified.
if (!all_of(Objects, isIdentifiedObject) &&
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
if (const GlobalValue *GV =
- dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr, DL)))
+ dyn_cast<GlobalValue>(getUnderlyingObject(Loc.Ptr, DL)))
// If GV is internal to this IR and there is no function with local linkage
// that has had their address taken, keep looking for a tighter ModRefInfo.
if (GV->hasLocalLinkage() && !UnknownFunctionsWithLocalLinkage)
// memory within the lifetime of the current function (allocas, byval
// arguments, globals), then determine the comparison result here.
SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
- GetUnderlyingObjects(LHS, LHSUObjs, DL);
- GetUnderlyingObjects(RHS, RHSUObjs, DL);
+ getUnderlyingObjects(LHS, LHSUObjs, DL);
+ getUnderlyingObjects(RHS, RHSUObjs, DL);
// Is the set of underlying objects all noalias calls?
auto IsNAC = [](ArrayRef<const Value *> Objects) {
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(L->getPointerOperand(),
+ getUnderlyingObject(L->getPointerOperand(),
L->getModule()->getDataLayout()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(S->getPointerOperand(),
+ getUnderlyingObject(S->getPointerOperand(),
S->getModule()->getDataLayout()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (!Len || Len->isZero()) return false;
if (MI->getDestAddressSpace() == 0)
- if (GetUnderlyingObject(MI->getRawDest(),
+ if (getUnderlyingObject(MI->getRawDest(),
MI->getModule()->getDataLayout()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
- if (GetUnderlyingObject(MTI->getRawSource(),
+ if (getUnderlyingObject(MTI->getRawSource(),
MTI->getModule()->getDataLayout()) == Ptr)
return true;
}
assert(Val->getType()->isPointerTy());
const DataLayout &DL = BB->getModule()->getDataLayout();
- Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
- // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
+ Value *UnderlyingVal = getUnderlyingObject(Val, DL);
+ // If 'getUnderlyingObject' didn't converge, skip it. It won't converge
// inside InstructionDereferencesPointer either.
- if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1))
+ if (UnderlyingVal == getUnderlyingObject(UnderlyingVal, DL, 1))
for (Instruction &I : *BB)
if (InstructionDereferencesPointer(&I, UnderlyingVal))
return true;
// TODO: Look through eliminable cast pairs.
// TODO: Look through calls with unique return values.
// TODO: Look through vector insert/extract/shuffle.
- V = OffsetOk ? GetUnderlyingObject(V, *DL) : V->stripPointerCasts();
+ V = OffsetOk ? getUnderlyingObject(V, *DL) : V->stripPointerCasts();
if (LoadInst *L = dyn_cast<LoadInst>(V)) {
BasicBlock::iterator BBI = L->getIterator();
BasicBlock *BB = L->getParent();
typedef SmallVector<const Value *, 16> ValueVector;
ValueVector TempObjects;
- GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
+ getUnderlyingObjects(Ptr, TempObjects, DL, LI);
LLVM_DEBUG(dbgs()
<< "Underlying objects for pointer " << *Ptr << "\n");
for (const Value *UnderlyingObj : TempObjects) {
// first pointer in the array.
Value *Ptr0 = VL[0];
const SCEV *Scev0 = SE.getSCEV(Ptr0);
- Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
+ Value *Obj0 = getUnderlyingObject(Ptr0, DL);
llvm::SmallSet<int64_t, 4> Offsets;
for (auto *Ptr : VL) {
return false;
// If a pointer refers to a different underlying object, bail - the
// pointers are by definition incomparable.
- Value *CurrObj = GetUnderlyingObject(Ptr, DL);
+ Value *CurrObj = getUnderlyingObject(Ptr, DL);
if (CurrObj != Obj0)
return false;
AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
TheLoop, AA, LI, DependentAccesses, *PSE);
- // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
+ // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
// multiple times on the same object. If the ptr is accessed twice, once
// for read and once for write, it will only appear once (on the write
// list). This is okay, since we are going to check for conflicts between
// looking for a clobber in many cases; that's an alias property and is
// handled by BasicAA.
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
- const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
+ const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr, DL);
if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
}
return true;
}
-Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
+Value *llvm::getUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup) {
if (!V->getType()->isPointerTy())
return V;
return V;
}
-void llvm::GetUnderlyingObjects(const Value *V,
+void llvm::getUnderlyingObjects(const Value *V,
SmallVectorImpl<const Value *> &Objects,
const DataLayout &DL, LoopInfo *LI,
unsigned MaxLookup) {
Worklist.push_back(V);
do {
const Value *P = Worklist.pop_back_val();
- P = GetUnderlyingObject(P, DL, MaxLookup);
+ P = getUnderlyingObject(P, DL, MaxLookup);
if (!Visited.insert(P).second)
continue;
} while (true);
}
-/// This is a wrapper around GetUnderlyingObjects and adds support for basic
+/// This is a wrapper around getUnderlyingObjects and adds support for basic
/// ptrtoint+arithmetic+inttoptr sequences.
-/// It returns false if unidentified object is found in GetUnderlyingObjects.
+/// It returns false if unidentified object is found in getUnderlyingObjects.
bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
SmallVectorImpl<Value *> &Objects,
const DataLayout &DL) {
V = Working.pop_back_val();
SmallVector<const Value *, 4> Objs;
- GetUnderlyingObjects(V, Objs, DL);
+ getUnderlyingObjects(V, Objs, DL);
for (const Value *V : Objs) {
if (!Visited.insert(V).second)
continue;
}
}
- // If GetUnderlyingObjects fails to find an identifiable object,
+ // If getUnderlyingObjects fails to find an identifiable object,
// getUnderlyingObjectsForCodeGen also fails for safety.
if (!isIdentifiedObject(V)) {
Objects.clear();
// Get the underlying objects for the location passed on the lifetime
// marker.
SmallVector<const Value *, 4> Allocas;
- GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
+ getUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
// Iterate over each underlying object, creating lifetime markers for each
// static alloca. Quit if we find a non-static alloca.
MachineMemOperand *MM = *MI->memoperands_begin();
if (!MM->getValue())
return;
- GetUnderlyingObjects(MM->getValue(), Objs, DL);
+ getUnderlyingObjects(MM->getValue(), Objs, DL);
for (const Value *V : Objs) {
if (!isIdentifiedObject(V)) {
Objs.clear();
PendingLoads.clear();
else if (MI.mayLoad()) {
SmallVector<const Value *, 4> Objs;
- getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
+ ::getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
if (Objs.empty())
Objs.push_back(UnknownValue);
for (auto V : Objs) {
}
} else if (MI.mayStore()) {
SmallVector<const Value *, 4> Objs;
- getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
+ ::getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
if (Objs.empty())
Objs.push_back(UnknownValue);
for (auto V : Objs) {
cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
Value *const ObjectPtr = I.getArgOperand(1);
SmallVector<const Value *, 4> Allocas;
- GetUnderlyingObjects(ObjectPtr, Allocas, *DL);
+ getUnderlyingObjects(ObjectPtr, Allocas, *DL);
for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
return true;
- const Value *Base = GetUnderlyingObject(Loc.Ptr, DL);
+ const Value *Base = getUnderlyingObject(Loc.Ptr, DL);
AS = Base->getType()->getPointerAddressSpace();
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS))
continue;
- PtrArg = GetUnderlyingObject(PtrArg, DL);
+ PtrArg = getUnderlyingObject(PtrArg, DL);
if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)
continue;
if (isa<ConstantPointerNull>(OtherOp))
return true;
- Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
+ Value *OtherObj = getUnderlyingObject(OtherOp, *DL);
if (!isa<AllocaInst>(OtherObj))
return false;
if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
const Value *Ptr = GEP->getPointerOperand();
const AllocaInst *Alloca =
- dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
+ dyn_cast<AllocaInst>(getUnderlyingObject(Ptr, DL));
if (!Alloca || !Alloca->isStaticAlloca())
continue;
Type *Ty = Alloca->getAllocatedType();
const MemSDNode *LD = cast<MemSDNode>(N);
return LD->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
(LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
- !isa<GlobalValue>(GetUnderlyingObject(
+ !isa<GlobalValue>(getUnderlyingObject(
LD->getMemOperand()->getValue(), CurDAG->getDataLayout())));
}]>;
(ops node:$ptr), (load node:$ptr), [{
const MemSDNode *LD = cast<MemSDNode>(N);
return LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
- isa<GlobalValue>(GetUnderlyingObject(
+ isa<GlobalValue>(getUnderlyingObject(
LD->getMemOperand()->getValue(), CurDAG->getDataLayout()));
}]>;
return false;
const MachineFunction &MF = *MI1.getParent()->getParent();
const DataLayout &DL = MF.getFunction().getParent()->getDataLayout();
- Base1 = GetUnderlyingObject(Base1, DL);
- Base2 = GetUnderlyingObject(Base2, DL);
+ Base1 = getUnderlyingObject(Base1, DL);
+ Base2 = getUnderlyingObject(Base2, DL);
if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
return false;
if (const Value *V = MMO.getValue()) {
SmallVector<const Value *, 4> Objs;
- GetUnderlyingObjects(V, Objs, DL);
+ ::getUnderlyingObjects(V, Objs, DL);
for (const Value *UValue : Objs) {
if (!isIdentifiedObject(V))
bool IsKernelFn = isKernelFunction(F->getFunction());
- // We use GetUnderlyingObjects() here instead of GetUnderlyingObject() mainly
+ // We use getUnderlyingObjects() here instead of getUnderlyingObject() mainly
// because the former looks through phi nodes while the latter does not. We
// need to look through phi nodes to handle pointer induction variables.
SmallVector<const Value *, 8> Objs;
- GetUnderlyingObjects(N->getMemOperand()->getValue(),
- Objs, F->getDataLayout());
+ getUnderlyingObjects(N->getMemOperand()->getValue(), Objs,
+ F->getDataLayout());
return all_of(Objs, [&](const Value *V) {
if (auto *A = dyn_cast<const Argument>(V))
for (auto &I : B) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (LI->getType()->isPointerTy()) {
- Value *UO = GetUnderlyingObject(LI->getPointerOperand(),
+ Value *UO = getUnderlyingObject(LI->getPointerOperand(),
F.getParent()->getDataLayout());
if (Argument *Arg = dyn_cast<Argument>(UO)) {
if (Arg->hasByValAttr()) {
/// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
Optional<Type *> identifyPrivatizableType(Attributor &A) override {
Value *Obj =
- GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
+ getUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
if (!Obj) {
LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
return nullptr;
// Try to optimize equality comparisons against alloca-based pointers.
if (Op0->getType()->isPointerTy() && I.isEquality()) {
assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
- if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
+ if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0, DL)))
if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
return New;
- if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
+ if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1, DL)))
if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
return New;
}
return replaceOperand(SI, 1, TrueSI->getTrueValue());
}
// select(C0, select(C1, a, b), b) -> select(C0&C1, a, b)
- // We choose this as normal form to enable folding on the And and shortening
- // paths for the values (this helps GetUnderlyingObjects() for example).
+ // We choose this as normal form to enable folding on the And and
+ // shortening paths for the values (this helps getUnderlyingObjects() for
+ // example).
if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) {
Value *And = Builder.CreateAnd(CondVal, TrueSI->getCondition());
replaceOperand(SI, 0, And);
if (ClOpt && ClOptGlobals) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
- GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
+ GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr, DL));
if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
NumOptimizedAccessesToGlobalVar++;
if (ClOpt && ClOptStack) {
// A direct inbounds access to a stack variable is always valid.
- if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
+ if (isa<AllocaInst>(getUnderlyingObject(Addr, DL)) &&
isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
NumOptimizedAccessesToStackVar++;
return;
const llvm::Align ShadowAlign(Align * DFS.ShadowWidthBytes);
SmallVector<const Value *, 2> Objs;
- GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
+ getUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
bool AllConstants = true;
for (const Value *Obj : Objs) {
if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
}
}
- if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
+ if (isa<AllocaInst>(getUnderlyingObject(Addr, DL)) &&
!PointerMayBeCaptured(Addr, true, true)) {
// The variable is addressable but not captured, so it cannot be
// referenced from a different thread and participate in a data race
// Check to see if the later store is to the entire object (either a global,
// an alloca, or a byval/inalloca argument). If so, then it clearly
// overwrites any other store to the same object.
- const Value *UO1 = GetUnderlyingObject(P1, DL),
- *UO2 = GetUnderlyingObject(P2, DL);
+ const Value *UO1 = getUnderlyingObject(P1, DL),
+ *UO2 = getUnderlyingObject(P2, DL);
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
break;
Value *DepPointer =
- GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
+ getUnderlyingObject(getStoredPointerOperand(Dependency), DL);
// Check for aliasing.
if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
const DataLayout &DL, AliasAnalysis *AA,
const TargetLibraryInfo *TLI,
const Function *F) {
- const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
+ const Value *UnderlyingPointer = getUnderlyingObject(LoadedLoc.Ptr, DL);
// A constant can't be in the dead pointer set.
if (isa<Constant>(UnderlyingPointer))
if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<const Value *, 4> Pointers;
- GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
+ getUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
// Stores to stack values are valid candidates for removal.
bool AllDead = true;
Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
Instruction *UnderlyingPointer =
- dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
+ dyn_cast<Instruction>(getUnderlyingObject(SI->getPointerOperand(), DL));
if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA, DL, DT)) {
// to it is dead along the unwind edge. Otherwise, we need to preserve
// the store.
if (LastThrowing && DepWrite->comesBefore(LastThrowing)) {
- const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
+ const Value *Underlying = getUnderlyingObject(DepLoc.Ptr, DL);
bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
if (!IsStoreDeadOnUnwind) {
// We're looking for a call to an allocation function
// object can be considered terminated.
if (MaybeTermLoc->second) {
DataLayout DL = MaybeTerm->getParent()->getModule()->getDataLayout();
- DefLoc = MemoryLocation(GetUnderlyingObject(DefLoc.Ptr, DL));
+ DefLoc = MemoryLocation(getUnderlyingObject(DefLoc.Ptr, DL));
}
return AA.isMustAlias(MaybeTermLoc->first, DefLoc);
}
Instruction *DefI = Def->getMemoryInst();
// See through pointer-to-pointer bitcasts
SmallVector<const Value *, 4> Pointers;
- GetUnderlyingObjects(getLocForWriteEx(DefI)->Ptr, Pointers, DL);
+ getUnderlyingObjects(getLocForWriteEx(DefI)->Ptr, Pointers, DL);
LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n");
}
MemoryLocation SILoc = *MaybeSILoc;
assert(SILoc.Ptr && "SILoc should not be null");
- const Value *SILocUnd = GetUnderlyingObject(SILoc.Ptr, DL);
+ const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr, DL);
// Check if the store is a no-op.
if (isRemovable(SI) && State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
MemoryLocation NILoc = *State.getLocForWriteEx(NI);
if (State.isMemTerminatorInst(SI)) {
- const Value *NIUnd = GetUnderlyingObject(NILoc.Ptr, DL);
+ const Value *NIUnd = getUnderlyingObject(NILoc.Ptr, DL);
if (!SILocUnd || SILocUnd != NIUnd)
continue;
LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NI
// we have to prove that the store is dead along the unwind edge. We do
// this by proving that the caller can't have a reference to the object
// after return and thus can't possibly load from the object.
- Value *Object = GetUnderlyingObject(SomePtr, MDL);
+ Value *Object = getUnderlyingObject(SomePtr, MDL);
if (!isKnownNonEscaping(Object, TLI))
return false;
// Subtlety: Alloca's aren't visible to callers, but *are* potentially
if (IsKnownThreadLocalObject)
SafeToInsertStore = true;
else {
- Value *Object = GetUnderlyingObject(SomePtr, MDL);
+ Value *Object = getUnderlyingObject(SomePtr, MDL);
SafeToInsertStore =
(isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
!PointerMayBeCaptured(Object, true, true);
break;
case LegalStoreKind::Memset: {
// Find the base pointer.
- Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
+ Value *Ptr = getUnderlyingObject(SI->getPointerOperand(), *DL);
StoreRefsForMemset[Ptr].push_back(SI);
} break;
case LegalStoreKind::MemsetPattern: {
// Find the base pointer.
- Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
+ Value *Ptr = getUnderlyingObject(SI->getPointerOperand(), *DL);
StoreRefsForMemsetPattern[Ptr].push_back(SI);
} break;
case LegalStoreKind::Memcpy:
if (Value *Ptr = getPointerOperand(V))
return getUnderlyingObjectThroughLoads(Ptr);
else if (V->getType()->isPointerTy())
- return GetUnderlyingObject(V, DL);
+ return getUnderlyingObject(V, DL);
return V;
}
default:
return RK;
case Attribute::NonNull:
- RK.WasOn = GetUnderlyingObject(RK.WasOn, M->getDataLayout());
+ RK.WasOn = getUnderlyingObject(RK.WasOn, M->getDataLayout());
return RK;
case Attribute::Alignment: {
Value *V = RK.WasOn->stripInBoundsOffsets([&](const Value *Strip) {
if (!RK.WasOn)
return true;
if (RK.WasOn->getType()->isPointerTy()) {
- Value *UnderlyingPtr = GetUnderlyingObject(RK.WasOn, M->getDataLayout());
+ Value *UnderlyingPtr = getUnderlyingObject(RK.WasOn, M->getDataLayout());
if (isa<AllocaInst>(UnderlyingPtr) || isa<GlobalValue>(UnderlyingPtr))
return false;
}
SmallSetVector<const Argument *, 4> NAPtrArgs;
for (const Value *V : PtrArgs) {
SmallVector<const Value *, 4> Objects;
- GetUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
+ getUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
for (const Value *O : Objects)
ObjSet.insert(O);
// producing an expression involving multiple pointers. Until then, we must
// bail out here.
//
- // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject
+ // Retrieve the pointer operand of the GEP. Don't use getUnderlyingObject
// because it understands lcssa phis while SCEV does not.
Value *FromPtr = FromVal;
Value *ToPtr = ToVal;
// SCEV may have rewritten an expression that produces the GEP's pointer
// operand. That's ok as long as the pointer operand has the same base
- // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the
+ // pointer. Unlike getUnderlyingObject(), getPointerBase() will find the
// base of a recurrence. This handles the case in which SCEV expansion
// converts a pointer type recurrence into a nonrecurrent pointer base
// indexed by an integer recurrence.
if (!Src)
return -1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(Src, DL));
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
return -1;
}
static ChainID getChainID(const Value *Ptr, const DataLayout &DL) {
- const Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
+ const Value *ObjPtr = getUnderlyingObject(Ptr, DL);
if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) {
// The select's themselves are distinct instructions even if they share the
// same condition and evaluate to consecutive pointers for true and false
continue;
if (!isValidElementType(SI->getValueOperand()->getType()))
continue;
- Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
+ Stores[getUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
}
// Ignore getelementptr instructions that have more than one index, a
; RUN: opt -passes='require<scalar-evolution>,require<aa>,loop(print-access-info)' -disable-output < %s 2>&1 | FileCheck %s
; Test that the loop accesses are proven safe in this case.
-; The analyzer uses to be confused by the "diamond" because GetUnderlyingObjects
+; The analyzer uses to be confused by the "diamond" because getUnderlyingObjects
; is saying that the two pointers can both points to null. The loop analyzer
-; needs to ignore null in the results returned by GetUnderlyingObjects.
+; needs to ignore null in the results returned by getUnderlyingObjects.
; CHECK: Memory dependences are safe with run-time checks
; }
; FIXME: This should be promotable. We need to use
-; GetUnderlyingObjects when looking at the icmp user.
+; getUnderlyingObjects when looking at the icmp user.
; CHECK-LABEL: @ptr_induction_var_same_alloca(
; CHECK: %alloca = alloca [64 x i32], align 4
ret void
}
-; This should vectorize if using GetUnderlyingObject
+; This should vectorize if using getUnderlyingObject
define void @multi_as_reduction_same_size(i32 addrspace(1)* %global, i64 %idx0, i64 %idx1) #0 {
; CHECK-LABEL: @multi_as_reduction_same_size(
; CHECK-NEXT: bb:
ret void
}
-; This should vectorize if using GetUnderlyingObject
+; This should vectorize if using getUnderlyingObject
; The add is done in the same width, even though the address space size is smaller
define void @multi_as_reduction_different_sized_noncanon(i32 addrspace(3)* %lds, i64 %idx0, i64 %idx1) #0 {
; CHECK-LABEL: @multi_as_reduction_different_sized_noncanon(