struct OffsetUse {
Use *U;
- uint64_t Offset;
+ int64_t Offset;
};
SmallVector<OffsetUse, 8> Queue;
// The active offset and use while visiting.
Use *U;
- uint64_t Offset;
+ int64_t Offset;
- void enqueueUsers(Instruction &I, uint64_t UserOffset) {
+ void enqueueUsers(Instruction &I, int64_t UserOffset) {
SmallPtrSet<User *, 8> UserSet;
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
UI != UE; ++UI) {
}
}
- bool computeConstantGEPOffset(GetElementPtrInst &GEPI, uint64_t &GEPOffset) {
+ bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
GEPOffset = Offset;
for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
GTI != GTE; ++GTI) {
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = TD.getStructLayout(STy);
- GEPOffset += SL->getElementOffset(ElementIdx);
+ uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
+ // Check that we can continue to model this GEP in a signed 64-bit offset.
+ if (ElementOffset > INT64_MAX ||
+ (GEPOffset >= 0 &&
+ ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) {
+ DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
+ << "what can be represented in an int64_t!\n"
+ << " alloca: " << P.AI << "\n");
+ return false;
+ }
+ if (GEPOffset < 0)
+ GEPOffset = ElementOffset + (uint64_t)-GEPOffset;
+ else
+ GEPOffset += ElementOffset;
continue;
}
- GEPOffset
- += OpC->getZExtValue() * TD.getTypeAllocSize(GTI.getIndexedType());
+ APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
+ Index *= APInt(Index.getBitWidth(),
+ TD.getTypeAllocSize(GTI.getIndexedType()));
+ Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
+ /*isSigned*/true);
+ // Check if the result can be stored in our int64_t offset.
+ if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) {
+ DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
+ << "what can be represented in an int64_t!\n"
+ << " alloca: " << P.AI << "\n");
+ return false;
+ }
+
+ GEPOffset = Index.getSExtValue();
}
return true;
}
return false;
}
- void insertUse(Instruction &I, uint64_t Offset, uint64_t Size,
+ void insertUse(Instruction &I, int64_t Offset, uint64_t Size,
bool IsSplittable = false) {
- uint64_t BeginOffset = Offset, EndOffset = Offset + Size;
-
- // Completely skip uses which start outside of the allocation.
- if (BeginOffset >= AllocSize) {
+ // Completely skip uses which don't overlap the allocation.
+ if ((Offset >= 0 && (uint64_t)Offset >= AllocSize) ||
+ (Offset < 0 && (uint64_t)-Offset >= Size)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
<< " which starts past the end of the " << AllocSize
<< " byte alloca:\n"
return;
}
- // Clamp the size to the allocation.
- if (EndOffset > AllocSize) {
+ // Clamp the start to the beginning of the allocation.
+ if (Offset < 0) {
+ DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
+ << " to start at the beginning of the alloca:\n"
+ << " alloca: " << P.AI << "\n"
+ << " use: " << I << "\n");
+ Size -= (uint64_t)-Offset;
+ Offset = 0;
+ }
+
+ uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
+
+ // Clamp the end offset to the end of the allocation. Note that this is
+ // formulated to handle even the case where "BeginOffset + Size" overflows.
+ assert(AllocSize >= BeginOffset); // Established above.
+ if (Size > AllocSize - BeginOffset) {
DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
<< " to remain within the " << AllocSize << " byte alloca:\n"
<< " alloca: " << P.AI << "\n"
P.Partitions.push_back(New);
}
- bool handleLoadOrStore(Type *Ty, Instruction &I, uint64_t Offset) {
+ bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
uint64_t Size = TD.getTypeStoreSize(Ty);
// If this memory access can be shown to *statically* extend outside the
// risk of overflow.
// FIXME: We should instead consider the pointer to have escaped if this
// function is being instrumented for addressing bugs or race conditions.
- if (Offset >= AllocSize || Size > AllocSize || Offset + Size > AllocSize) {
+ if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
+ Size > (AllocSize - (uint64_t)Offset)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte "
<< (isa<LoadInst>(I) ? "load" : "store") << " @" << Offset
<< " which extends past the end of the " << AllocSize
}
bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
- uint64_t GEPOffset;
+ int64_t GEPOffset;
if (!computeConstantGEPOffset(GEPI, GEPOffset))
return markAsEscaping(GEPI);
P.DeadUsers.push_back(&I);
}
- void insertUse(Instruction &User, uint64_t Offset, uint64_t Size) {
- uint64_t BeginOffset = Offset, EndOffset = Offset + Size;
-
+ void insertUse(Instruction &User, int64_t Offset, uint64_t Size) {
// If the use extends outside of the allocation, record it as a dead use
// for elimination later.
- if (BeginOffset >= AllocSize || Size == 0)
+ if ((uint64_t)Offset >= AllocSize ||
+ (Offset < 0 && (uint64_t)-Offset >= Size))
return markAsDead(User);
- // Bound the use by the size of the allocation.
- if (EndOffset > AllocSize)
+ // Clamp the start to the beginning of the allocation.
+ if (Offset < 0) {
+ Size -= (uint64_t)-Offset;
+ Offset = 0;
+ }
+
+ uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
+
+ // Clamp the end offset to the end of the allocation. Note that this is
+ // formulated to handle even the case where "BeginOffset + Size" overflows.
+ assert(AllocSize >= BeginOffset); // Established above.
+ if (Size > AllocSize - BeginOffset)
EndOffset = AllocSize;
// NB: This only works if we have zero overlapping partitions.
}
}
- void handleLoadOrStore(Type *Ty, Instruction &I, uint64_t Offset) {
+ void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
uint64_t Size = TD.getTypeStoreSize(Ty);
// If this memory access can be shown to *statically* extend outside the
// bounds of of the allocation, it's behavior is undefined, so simply
// ignore it. Note that this is more strict than the generic clamping
// behavior of insertUse.
- if (Offset >= AllocSize || Size > AllocSize || Offset + Size > AllocSize)
+ if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
+ Size > (AllocSize - (uint64_t)Offset))
return markAsDead(I);
insertUse(I, Offset, Size);
if (GEPI.use_empty())
return markAsDead(GEPI);
- uint64_t GEPOffset;
+ int64_t GEPOffset;
if (!computeConstantGEPOffset(GEPI, GEPOffset))
llvm_unreachable("Unable to compute constant offset for use");
%val = load i64* %gep
ret i32 undef
}
+
+define i32 @test20() {
+; Ensure we can track negative offsets (before the beginning of the alloca) and
+; negative relative offsets from offsets starting past the end of the alloca.
+; CHECK: @test20
+; CHECK-NOT: alloca
+; CHECK: %[[sum1:.*]] = add i32 1, 2
+; CHECK: %[[sum2:.*]] = add i32 %[[sum1]], 3
+; CHECK: ret i32 %[[sum2]]
+
+entry:
+ %a = alloca [3 x i32]
+ %gep1 = getelementptr [3 x i32]* %a, i32 0, i32 0
+ store i32 1, i32* %gep1
+ %gep2.1 = getelementptr [3 x i32]* %a, i32 0, i32 -2
+ %gep2.2 = getelementptr i32* %gep2.1, i32 3
+ store i32 2, i32* %gep2.2
+ %gep3.1 = getelementptr [3 x i32]* %a, i32 0, i32 14
+ %gep3.2 = getelementptr i32* %gep3.1, i32 -12
+ store i32 3, i32* %gep3.2
+
+ %load1 = load i32* %gep1
+ %load2 = load i32* %gep2.2
+ %load3 = load i32* %gep3.2
+ %sum1 = add i32 %load1, %load2
+ %sum2 = add i32 %sum1, %load3
+ ret i32 %sum2
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
+
+define i8 @test21() {
+; Test allocations and offsets which border on overflow of the int64_t used
+; internally. This is really awkward to really test as LLVM doesn't really
+; support such extreme constructs cleanly.
+; CHECK: @test21
+; CHECK-NOT: alloca
+; CHECK: or i8 -1, -1
+
+entry:
+ %a = alloca [2305843009213693951 x i8]
+ %gep0 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
+ store i8 255, i8* %gep0
+ %gep1 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
+ %gep2 = getelementptr i8* %gep1, i64 -1
+ call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i32 1, i1 false)
+ %gep3 = getelementptr i8* %gep1, i64 9223372036854775807
+ %gep4 = getelementptr i8* %gep3, i64 9223372036854775807
+ %gep5 = getelementptr i8* %gep4, i64 -6917529027641081857
+ store i8 255, i8* %gep5
+ %cast1 = bitcast i8* %gep4 to i32*
+ store i32 0, i32* %cast1
+ %load = load i8* %gep0
+ %gep6 = getelementptr i8* %gep0, i32 1
+ %load2 = load i8* %gep6
+ %result = or i8 %load, %load2
+ ret i8 %result
+}