return Base.FI;
}
- void setOffset(int64_t Offset_) { Offset = Offset_; }
+ void setOffset(int64_t Offset_) {
+ assert(Offset_ >= 0 && "Offsets must be non-negative");
+ Offset = Offset_;
+ }
int64_t getOffset() const { return Offset; }
void setGlobalValue(const GlobalValue *G) { GV = G; }
const GlobalValue *getGlobalValue() const { return GV; }
case Instruction::GetElementPtr: {
Address SavedAddr = Addr;
uint64_t TmpOffset = Addr.getOffset();
+ // Non-inbounds geps can wrap; wasm's offsets can't.
+ if (!cast<GEPOperator>(U)->isInBounds())
+ goto unsupported_gep;
// Iterate through the GEP folding the constants into offsets where
// we can.
for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
}
}
}
- // Try to grab the base operand now.
- Addr.setOffset(TmpOffset);
- if (computeAddress(U->getOperand(0), Addr))
- return true;
+ // Don't fold in negative offsets.
+ if (int64_t(TmpOffset) >= 0) {
+ // Try to grab the base operand now.
+ Addr.setOffset(TmpOffset);
+ if (computeAddress(U->getOperand(0), Addr))
+ return true;
+ }
// We failed, restore everything and try the other options.
Addr = SavedAddr;
unsupported_gep:
std::swap(LHS, RHS);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
- Addr.setOffset(Addr.getOffset() + CI->getSExtValue());
- return computeAddress(LHS, Addr);
+ uint64_t TmpOffset = Addr.getOffset() + CI->getSExtValue();
+ if (int64_t(TmpOffset) >= 0) {
+ Addr.setOffset(TmpOffset);
+ return computeAddress(LHS, Addr);
+ }
}
Address Backup = Addr;
const Value *RHS = U->getOperand(1);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
- Addr.setOffset(Addr.getOffset() - CI->getSExtValue());
- return computeAddress(LHS, Addr);
+ int64_t TmpOffset = Addr.getOffset() - CI->getSExtValue();
+ if (TmpOffset >= 0) {
+ Addr.setOffset(TmpOffset);
+ return computeAddress(LHS, Addr);
+ }
}
break;
}
%y = bitcast i64 %x to double
ret double %y
}
+
+; Do fold offsets into geps.
+; CHECK-LABEL: do_fold_offset_into_gep:
+; CHECK: i64.load $push{{[0-9]+}}=, 8($0)
+define i64 @do_fold_offset_into_gep(i64* %p) {
+bb:
+ %tmp = getelementptr inbounds i64, i64* %p, i32 1
+ %tmp2 = load i64, i64* %tmp, align 8
+ ret i64 %tmp2
+}
+
+; Don't fold negative offsets into geps.
+; CHECK-LABEL: dont_fold_negative_offset:
+; CHECK: i64.load $push{{[0-9]+}}=, 0($pop{{[0-9]+}})
+define i64 @dont_fold_negative_offset(i64* %p) {
+bb:
+ %tmp = getelementptr inbounds i64, i64* %p, i32 -1
+ %tmp2 = load i64, i64* %tmp, align 8
+ ret i64 %tmp2
+}
+
+; Don't fold non-inbounds geps.
+; CHECK-LABEL: dont_fold_non_inbounds_gep:
+; CHECK: i64.load $push{{[0-9]+}}=, 0($pop{{[0-9]+}})
+define i64 @dont_fold_non_inbounds_gep(i64* %p) {
+bb:
+ %tmp = getelementptr i64, i64* %p, i32 1
+ %tmp2 = load i64, i64* %tmp, align 8
+ ret i64 %tmp2
+}