return MemDepResult::getNonFuncLocal();
}
-/// Return true if LI is a load that would fully overlap MemLoc if done as
-/// a wider legal integer load.
-///
-/// MemLocBase, MemLocOffset are lazily computed here the first time the
-/// base/offs of memloc is needed.
-static bool isLoadLoadClobberIfExtendedToFullWidth(const MemoryLocation &MemLoc,
- const Value *&MemLocBase,
- int64_t &MemLocOffs,
- const LoadInst *LI) {
- const DataLayout &DL = LI->getModule()->getDataLayout();
-
- // If we haven't already computed the base/offset of MemLoc, do so now.
- if (!MemLocBase)
- MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
-
- unsigned Size = MemoryDependenceResults::getLoadLoadClobberFullWidthSize(
- MemLocBase, MemLocOffs, MemLoc.Size, LI);
- return Size != 0;
-}
-
unsigned MemoryDependenceResults::getLoadLoadClobberFullWidthSize(
const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
const LoadInst *LI) {
MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
-
- const Value *MemLocBase = nullptr;
- int64_t MemLocOffset = 0;
bool isInvariantLoad = false;
if (!Limit) {
AliasResult R = AA.alias(LoadLoc, MemLoc);
if (isLoad) {
- if (R == NoAlias) {
- // If this is an over-aligned integer load (for example,
- // "load i8* %P, align 4") see if it would obviously overlap with the
- // queried location if widened to a larger load (e.g. if the queried
- // location is 1 byte at P+1). If so, return it as a load/load
- // clobber result, allowing the client to decide to widen the load if
- // it wants to.
- if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
- if (LI->getAlignment() * 8 > ITy->getPrimitiveSizeInBits() &&
- isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
- MemLocOffset, LI))
- return MemDepResult::getClobber(Inst);
- }
+ if (R == NoAlias)
continue;
- }
// Must aliased loads are defs of each other.
if (R == MustAlias)
ret void
}
-;; Accessing bytes 4 and 5. Ok to widen to i16.
+;; Accessing bytes 4 and 5. No widen to i16.
define i32 @test_widening_ok(i8* %P) nounwind ssp noredzone sanitize_address {
entry:
%add = add nsw i32 %conv, %conv2
ret i32 %add
; CHECK: @test_widening_ok
-; CHECK: __asan_report_load2
+; CHECK: __asan_report_load1
+; CHECK: __asan_report_load1
; CHECK-NOT: __asan_report
; CHECK: end_test_widening_ok
}
}
; %1 is partially redundant if %0 can be widened to a 64-bit load.
+; But we should not widen %0 to 64-bit load.
; CHECK-LABEL: define i32 @overaligned_load
; CHECK: if.then:
-; CHECK: %0 = load i64
-; CHECK: [[LSHR:%[0-9]+]] = lshr i64 %0, 32, !dbg [[LSHR_LOC:![0-9]+]]
-; CHECK: trunc i64 [[LSHR]] to i32
+; CHECK-NOT: %0 = load i64
+; CHECK-NOT: [[LSHR:%[0-9]+]] = lshr i64 %0, 32, !dbg [[LSHR_LOC:![0-9]+]]
+; CHECK-NOT: trunc i64 [[LSHR]] to i32
; CHECK: if.end:
-; CHECK-NOT: %1 = load i32, i32*
-; CHECK: [[LSHR_LOC]] = !DILocation(line: 101, column: 1, scope: !{{.*}})
+; CHECK: %1 = load i32, i32*
+; CHECK-NOT: [[LSHR_LOC]] = !DILocation(line: 101, column: 1, scope: !{{.*}})
define i32 @overaligned_load(i32 %a, i32* nocapture %b) !dbg !13 {
entry:
;;===----------------------------------------------------------------------===;;
;; Load Widening
+;; We explicitly choose NOT to widen. And are testing to make sure we don't.
;;===----------------------------------------------------------------------===;;
%widening1 = type { i32, i8, i8, i8, i8 }
ret i32 %add
; CHECK-LABEL: @test_widening1(
; CHECK-NOT: load
-; CHECK: load i16, i16*
+; CHECK: load i8, i8*
+; CHECK: load i8, i8*
; CHECK-NOT: load
; CHECK: ret i32
}
ret i32 %add3
; CHECK-LABEL: @test_widening2(
; CHECK-NOT: load
-; CHECK: load i32, i32*
+; CHECK: load i8, i8*
+; CHECK: load i8, i8*
+; CHECK: load i8, i8*
+; CHECK: load i8, i8*
; CHECK-NOT: load
; CHECK: ret i32
}
;; loads reusing a load value.
define i64 @test1({ i1, i8 }* %predA, { i1, i8 }* %predB) {
; CHECK-LABEL: @test1
-; CHECK: [[V1:%.*]] = load i16, i16* %{{.*}}
-; CHECK: [[V2:%.*]] = lshr i16 [[V1]], 8
-; CHECK: trunc i16 [[V2]] to i1
+; CHECK-NOT: [[V1:%.*]] = load i16, i16* %{{.*}}
+; CHECK-NOT: [[V2:%.*]] = lshr i16 [[V1]], 8
+; CHECK-NOT: trunc i16 [[V2]] to i1
%valueLoadA.fca.0.gep = getelementptr inbounds { i1, i8 }, { i1, i8 }* %predA, i64 0, i32 0
%valueLoadA.fca.0.load = load i1, i1* %valueLoadA.fca.0.gep, align 8
}
; CHECK-LABEL: @TestNoAsan
-; CHECK: %[[LOAD:[^ ]+]] = load i32
-; CHECK: {{.*}} = ashr i32 %[[LOAD]]
-; CHECK-NOT: {{.*}} = phi
+; CHECK: ret i32 0
define i32 @TestAsan() sanitize_address {
%1 = tail call noalias i8* @_Znam(i64 2)
%x.tr = phi %struct.a* [ %x, %entry ], [ null, %land.lhs.true ]
%code1 = getelementptr inbounds %struct.a, %struct.a* %x.tr, i32 0, i32 0
%0 = load i16, i16* %code1, align 4
-; CHECK: load i32, i32*
+; CHECK: load i16, i16*
%conv = zext i16 %0 to i32
switch i32 %conv, label %if.end.50 [
i32 43, label %cleanup
cond.false: ; preds = %if.then.26
; CHECK: cond.false:
-; CHECK-NOT: load
+; CHECK: load i16
%mode = getelementptr inbounds %struct.a, %struct.a* %x.tr.lcssa163, i32 0, i32 1
%bf.load = load i16, i16* %mode, align 2
%bf.shl = shl i16 %bf.load, 8