case llvm::Intrinsic::memmove:
case llvm::Intrinsic::memcpy:
AF = SE->getSCEVAtScope(cast<MemTransferInst>(II).getSource(), L);
- BP = dyn_cast<SCEVUnknown>(SE->getPointerBase(AF));
- // Bail if the source pointer is not valid.
- if (!isValidAccess(&II, AF, BP, Context))
- return false;
+ if (!AF->isZero()) {
+ BP = dyn_cast<SCEVUnknown>(SE->getPointerBase(AF));
+ // Bail if the source pointer is not valid.
+ if (!isValidAccess(&II, AF, BP, Context))
+ return false;
+ }
// Fall through
case llvm::Intrinsic::memset:
AF = SE->getSCEVAtScope(cast<MemIntrinsic>(II).getDest(), L);
- BP = dyn_cast<SCEVUnknown>(SE->getPointerBase(AF));
- // Bail if the destination pointer is not valid.
- if (!isValidAccess(&II, AF, BP, Context))
- return false;
+ if (!AF->isZero()) {
+ BP = dyn_cast<SCEVUnknown>(SE->getPointerBase(AF));
+ // Bail if the destination pointer is not valid.
+ if (!isValidAccess(&II, AF, BP, Context))
+ return false;
+ }
// Bail if the length is not affine.
if (!isAffine(SE->getSCEVAtScope(cast<MemIntrinsic>(II).getLength(), L), L,
auto *DestPtrVal = MemIntr->getDest();
assert(DestPtrVal);
+
auto *DestAccFunc = SE->getSCEVAtScope(DestPtrVal, L);
assert(DestAccFunc);
+ // Ignore accesses to "NULL".
+ // TODO: We could use this to optimize the region further, e.g., intersect
+ // the context with
+ // isl_set_complement(isl_set_params(getDomain()))
+ // as we know it would be undefined to execute this instruction anyway.
+ if (DestAccFunc->isZero())
+ return true;
+
auto *DestPtrSCEV = dyn_cast<SCEVUnknown>(SE->getPointerBase(DestAccFunc));
assert(DestPtrSCEV);
DestAccFunc = SE->getMinusSCEV(DestAccFunc, DestPtrSCEV);
auto *SrcPtrVal = MemTrans->getSource();
assert(SrcPtrVal);
+
auto *SrcAccFunc = SE->getSCEVAtScope(SrcPtrVal, L);
assert(SrcAccFunc);
+ // Ignore accesses to "NULL".
+ // TODO: See above TODO
+ if (SrcAccFunc->isZero())
+ return true;
+
auto *SrcPtrSCEV = dyn_cast<SCEVUnknown>(SE->getPointerBase(SrcAccFunc));
assert(SrcPtrSCEV);
SrcAccFunc = SE->getMinusSCEV(SrcAccFunc, SrcPtrSCEV);
--- /dev/null
+; RUN: opt %loadPolly -polly-allow-modref-calls -polly-scops -analyze < %s | FileCheck %s
+; RUN: opt %loadPolly -polly-allow-modref-calls -S -polly-codegen < %s
+;
+; Verify we can handle a memset to "null" and that we do not model it.
+; TODO: FIXME: We could use the undefined memset to optimize the code further,
+; see the TODOs in the ScopInfo.cpp.
+;
+; CHECK: Statements {
+; CHECK-NEXT: Stmt_for_cond5_preheader_us221
+; CHECK-NEXT: Domain :=
+; CHECK-NEXT: { Stmt_for_cond5_preheader_us221[0] };
+; CHECK-NEXT: Schedule :=
+; CHECK-NEXT: { Stmt_for_cond5_preheader_us221[i0] -> [0] };
+; CHECK-NEXT: MustWriteAccess := [Reduction Type: NONE] [Scalar: 0]
+; CHECK-NEXT: { Stmt_for_cond5_preheader_us221[i0] -> MemRef_A[0] };
+; CHECK-NEXT: }
+
+;
+target datalayout = "e-m:e-i64:64-i128:128-n8:16:32:64-S128"
+
+define void @test(i32* %A) {
+entry:
+ br i1 undef, label %for.end68, label %for.cond5.preheader.lr.ph
+
+for.cond5.preheader.lr.ph: ; preds = %entry
+ br label %for.cond5.preheader.us221
+
+for.cond5.preheader.us221: ; preds = %for.cond5.preheader.us221, %for.cond5.preheader.lr.ph
+ store i32 0, i32* %A
+ call void @llvm.memset.p0i8.i64(i8* null, i8 0, i64 undef, i32 1, i1 false)
+ br i1 true, label %for.end68, label %for.cond5.preheader.us221
+
+for.end68: ; preds = %for.cond5.preheader.us221, %entry
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)