static const OMPClauseWithPreInit *get(const OMPClause *C);
};
+/// Class that handles post-update expression for some clauses, like
+/// 'lastprivate', 'reduction' etc.
+class OMPClauseWithPostUpdate {
+ friend class OMPClauseReader;
+ /// Post-update expression for the clause.
+ Expr *PostUpdate;
+protected:
+ /// Set pre-initialization statement for the clause.
+ void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
+ OMPClauseWithPostUpdate(const OMPClause *This) : PostUpdate(nullptr) {
+ assert(get(This) && "get is not tuned.");
+ }
+
+public:
+ /// Get post-update expression for the clause.
+ const Expr *getPostUpdateExpr() const { return PostUpdate; }
+ /// Get post-update expression for the clause.
+ Expr *getPostUpdateExpr() { return PostUpdate; }
+ static OMPClauseWithPostUpdate *get(OMPClause *C);
+ static const OMPClauseWithPostUpdate *get(const OMPClause *C);
+};
+
/// \brief This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
+ public OMPClauseWithPreInit,
+ public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
- LParenLoc, EndLoc, N) {}
+ LParenLoc, EndLoc, N),
+ OMPClauseWithPreInit(this), OMPClauseWithPostUpdate(this) {}
/// \brief Build an empty clause.
///
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
- SourceLocation(), N) {}
+ SourceLocation(), N),
+ OMPClauseWithPreInit(this), OMPClauseWithPostUpdate(this) {}
/// \brief Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
- ///
+ /// \param PreInit Statement that must be executed before entering the OpenMP
+ /// region with this clause.
+ /// \param PostInit Expression that must be executed after exit from the
+ /// OpenMP region with this clause.
///
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
- ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
+ Stmt *PreInit, Expr *PostUpdate);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
template <typename T> bool VisitOMPClauseList(T *Node);
/// Process clauses with pre-initis.
bool VisitOMPClauseWithPreInit(OMPClauseWithPreInit *Node);
+ bool VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *Node);
bool dataTraverseNode(Stmt *S, DataRecursionQueue *Queue);
};
return true;
}
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPClauseWithPostUpdate(
+ OMPClauseWithPostUpdate *Node) {
+ TRY_TO(TraverseStmt(Node->getPostUpdateExpr()));
+ return true;
+}
+
template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPIfClause(OMPIfClause *C) {
TRY_TO(TraverseStmt(C->getCondition()));
bool RecursiveASTVisitor<Derived>::VisitOMPLastprivateClause(
OMPLastprivateClause *C) {
TRY_TO(VisitOMPClauseList(C));
+ TRY_TO(VisitOMPClauseWithPreInit(C));
+ TRY_TO(VisitOMPClauseWithPostUpdate(C));
for (auto *E : C->private_copies()) {
TRY_TO(TraverseStmt(E));
}
return static_cast<const OMPDistScheduleClause *>(C);
case OMPC_firstprivate:
return static_cast<const OMPFirstprivateClause *>(C);
+ case OMPC_lastprivate:
+ return static_cast<const OMPLastprivateClause *>(C);
case OMPC_default:
case OMPC_proc_bind:
case OMPC_if:
case OMPC_simdlen:
case OMPC_collapse:
case OMPC_private:
+ case OMPC_shared:
+ case OMPC_reduction:
+ case OMPC_linear:
+ case OMPC_aligned:
+ case OMPC_copyin:
+ case OMPC_copyprivate:
+ case OMPC_ordered:
+ case OMPC_nowait:
+ case OMPC_untied:
+ case OMPC_mergeable:
+ case OMPC_threadprivate:
+ case OMPC_flush:
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_seq_cst:
+ case OMPC_depend:
+ case OMPC_device:
+ case OMPC_threads:
+ case OMPC_simd:
+ case OMPC_map:
+ case OMPC_num_teams:
+ case OMPC_thread_limit:
+ case OMPC_priority:
+ case OMPC_grainsize:
+ case OMPC_nogroup:
+ case OMPC_num_tasks:
+ case OMPC_hint:
+ case OMPC_defaultmap:
+ case OMPC_unknown:
+ break;
+ }
+
+ return nullptr;
+}
+
+OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(OMPClause *C) {
+ auto *Res = OMPClauseWithPostUpdate::get(const_cast<const OMPClause *>(C));
+ return Res ? const_cast<OMPClauseWithPostUpdate *>(Res) : nullptr;
+}
+
+const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C) {
+ switch (C->getClauseKind()) {
case OMPC_lastprivate:
+ return static_cast<const OMPLastprivateClause *>(C);
+ case OMPC_schedule:
+ case OMPC_dist_schedule:
+ case OMPC_firstprivate:
+ case OMPC_default:
+ case OMPC_proc_bind:
+ case OMPC_if:
+ case OMPC_final:
+ case OMPC_num_threads:
+ case OMPC_safelen:
+ case OMPC_simdlen:
+ case OMPC_collapse:
+ case OMPC_private:
case OMPC_shared:
case OMPC_reduction:
case OMPC_linear:
OMPLastprivateClause *OMPLastprivateClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
- ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
+ ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit,
+ Expr *PostUpdate) {
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
OMPLastprivateClause *Clause =
new (Mem) OMPLastprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
Clause->setSourceExprs(SrcExprs);
Clause->setDestinationExprs(DstExprs);
Clause->setAssignmentOps(AssignmentOps);
+ Clause->setPreInitStmt(PreInit);
+ Clause->setPostUpdateExpr(PostUpdate);
return Clause;
}
void Visit##Class(const Class *C);
#include "clang/Basic/OpenMPKinds.def"
void VistOMPClauseWithPreInit(const OMPClauseWithPreInit *C);
+ void VistOMPClauseWithPostUpdate(const OMPClauseWithPostUpdate *C);
};
void OMPClauseProfiler::VistOMPClauseWithPreInit(
Profiler->VisitStmt(S);
}
+void OMPClauseProfiler::VistOMPClauseWithPostUpdate(
+ const OMPClauseWithPostUpdate *C) {
+ if (auto *E = C->getPostUpdateExpr())
+ Profiler->VisitStmt(E);
+}
+
void OMPClauseProfiler::VisitOMPIfClause(const OMPIfClause *C) {
if (C->getCondition())
Profiler->VisitStmt(C->getCondition());
void
OMPClauseProfiler::VisitOMPLastprivateClause(const OMPLastprivateClause *C) {
VisitOMPClauseList(C);
+ VistOMPClauseWithPreInit(C);
+ VistOMPClauseWithPostUpdate(C);
for (auto *E : C->source_exprs()) {
Profiler->VisitStmt(E);
}
}
}
- class PostUpdateCleanup final : public EHScopeStack::Cleanup {
- const OMPExecutableDirective &S;
-
- public:
- PostUpdateCleanup(const OMPExecutableDirective &S) : S(S) {}
-
- void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
- if (!CGF.HaveInsertPoint())
- return;
- (void)S;
- // TODO: add cleanups for clauses that require post update.
- }
- };
-
public:
OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
: Scope(CGF, S.getSourceRange()) {
emitPreInitStmt(CGF, S);
- CGF.EHStack.pushCleanup<PostUpdateCleanup>(NormalAndEHCleanup, S);
}
};
} // namespace
++ISrcRef;
++IDestRef;
}
+ if (auto *PostUpdate = C->getPostUpdateExpr())
+ EmitIgnoredExpr(PostUpdate);
}
if (IsLastIterCond)
EmitBlock(DoneBB, /*IsFinished=*/true);
PrivateCopies.push_back(nullptr);
continue;
}
- VarDecl *VD = nullptr;
- FieldDecl *FD = nullptr;
- ValueDecl *D;
auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
- if (auto *OCE = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl())) {
- FD = cast<FieldDecl>(
- cast<MemberExpr>(OCE->getInit()->IgnoreImpCasts())
- ->getMemberDecl());
- D = FD;
- } else {
- VD = cast<VarDecl>(DRE->getDecl());
- D = VD;
- }
- QualType Type = D->getType().getNonReferenceType();
- auto DVar = DSAStack->getTopDSA(D, false);
+ VarDecl *VD = cast<VarDecl>(DRE->getDecl());
+ QualType Type = VD->getType().getNonReferenceType();
+ auto DVar = DSAStack->getTopDSA(VD, false);
if (DVar.CKind == OMPC_lastprivate) {
// Generate helper private variable and initialize it with the
// default value. The address of the original variable is replaced
// region uses original variable for proper diagnostics.
auto *VDPrivate = buildVarDecl(
*this, DE->getExprLoc(), Type.getUnqualifiedType(),
- D->getName(), D->hasAttrs() ? &D->getAttrs() : nullptr);
+ VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr);
ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false);
if (VDPrivate->isInvalidDecl())
continue;
// Required for proper codegen of combined directives.
// TODO: add processing for other clauses.
if (auto *C = OMPClauseWithPreInit::get(Clause)) {
- if (auto *S = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
- for (auto *D : S->decls())
+ if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
+ for (auto *D : DS->decls())
MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
}
}
+ if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
+ if (auto *E = C->getPostUpdateExpr())
+ MarkDeclarationsReferencedInExpr(E);
+ }
}
if (Clause->getClauseKind() == OMPC_schedule)
SC = cast<OMPScheduleClause>(Clause);
auto ElemType = Context.getBaseElementType(Type).getNonReferenceType();
// If an implicit firstprivate variable found it was checked already.
+ DSAStackTy::DSAVarData TopDVar;
if (!IsImplicitClause) {
DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, false);
+ TopDVar = DVar;
bool IsConstant = ElemType.isConstant(Context);
// OpenMP [2.4.13, Data-sharing Attribute Clauses]
// A list item that specifies a given variable may not appear in more
RefExpr->getExprLoc());
DeclRefExpr *Ref = nullptr;
if (!VD) {
- Ref = buildCapture(*this, D, RefExpr);
- ExprCaptures.push_back(Ref->getDecl());
+ if (TopDVar.CKind == OMPC_lastprivate)
+ Ref = TopDVar.PrivateCopy;
+ else {
+ Ref = buildCapture(*this, D, RefExpr);
+ if (!IsOpenMPCapturedDecl(D))
+ ExprCaptures.push_back(Ref->getDecl());
+ }
}
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
Vars.push_back(VD ? RefExpr->IgnoreParens() : Ref);
SmallVector<Expr *, 8> SrcExprs;
SmallVector<Expr *, 8> DstExprs;
SmallVector<Expr *, 8> AssignmentOps;
+ SmallVector<Decl *, 4> ExprCaptures;
+ SmallVector<Expr *, 4> ExprPostUpdates;
for (auto &RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP lastprivate clause.");
auto Res = getPrivateItem(*this, RefExpr);
continue;
DeclRefExpr *Ref = nullptr;
- if (!VD)
- Ref = buildCapture(*this, D, RefExpr);
+ if (!VD) {
+ if (TopDVar.CKind == OMPC_firstprivate)
+ Ref = TopDVar.PrivateCopy;
+ else {
+ Ref = buildCapture(*this, D, RefExpr);
+ if (!IsOpenMPCapturedDecl(D))
+ ExprCaptures.push_back(Ref->getDecl());
+ }
+ if (TopDVar.CKind == OMPC_firstprivate ||
+ (!IsOpenMPCapturedDecl(D) &&
+ !Ref->getDecl()->getType()->isReferenceType())) {
+ ExprResult RefRes = DefaultLvalueConversion(Ref);
+ if (!RefRes.isUsable())
+ continue;
+ ExprResult PostUpdateRes =
+ BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign,
+ RefExpr->IgnoreParenLValueCasts(), RefRes.get());
+ if (!PostUpdateRes.isUsable())
+ continue;
+ ExprPostUpdates.push_back(PostUpdateRes.get());
+ }
+ }
if (TopDVar.CKind != OMPC_firstprivate)
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_lastprivate, Ref);
Vars.push_back(VD ? RefExpr->IgnoreParens() : Ref);
if (Vars.empty())
return nullptr;
+ Stmt *PreInit = nullptr;
+ if (!ExprCaptures.empty()) {
+ PreInit = new (Context)
+ DeclStmt(DeclGroupRef::Create(Context, ExprCaptures.begin(),
+ ExprCaptures.size()),
+ SourceLocation(), SourceLocation());
+ }
+ Expr *PostUpdate = nullptr;
+ if (!ExprPostUpdates.empty()) {
+ for (auto *E : ExprPostUpdates) {
+ ExprResult PostUpdateRes =
+ PostUpdate
+ ? CreateBuiltinBinOp(SourceLocation(), BO_Comma, PostUpdate, E)
+ : E;
+ PostUpdate = PostUpdateRes.get();
+ }
+ }
return OMPLastprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars, SrcExprs, DstExprs, AssignmentOps);
+ Vars, SrcExprs, DstExprs, AssignmentOps,
+ PreInit, PostUpdate);
}
OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
#include "clang/Basic/OpenMPKinds.def"
OMPClause *readClause();
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
+ void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
};
}
C->setPreInitStmt(Reader->Reader.ReadSubStmt());
}
+void OMPClauseReader::VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C) {
+ C->setPostUpdateExpr(Reader->Reader.ReadSubExpr());
+}
+
void OMPClauseReader::VisitOMPIfClause(OMPIfClause *C) {
C->setNameModifier(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
C->setNameModifierLoc(Reader->ReadSourceLocation(Record, Idx));
}
void OMPClauseReader::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPostUpdate(C);
C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
#include "clang/Basic/OpenMPKinds.def"
void writeClause(OMPClause *C);
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
+ void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
};
}
Writer->Writer.AddStmt(C->getPreInitStmt());
}
+void OMPClauseWriter::VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C) {
+ Writer->Writer.AddStmt(C->getPostUpdateExpr());
+}
+
void OMPClauseWriter::VisitOMPIfClause(OMPIfClause *C) {
Record.push_back(C->getNameModifier());
Writer->Writer.AddSourceLocation(C->getNameModifierLoc(), Record);
void OMPClauseWriter::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
Record.push_back(C->varlist_size());
+ VisitOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPostUpdate(C);
Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record);
for (auto *VE : C->varlists())
Writer->Writer.AddStmt(VE);
#ifndef HEADER
#define HEADER
+struct SS {
+ int a;
+ int b : 4;
+ int &c;
+ SS(int &d) : a(0), b(0), c(d) {
+#pragma omp parallel
+#pragma omp for lastprivate(a, b, c)
+ for (int i = 0; i < 2; ++i)
+#ifdef LAMBDA
+ [&]() {
+ ++this->a, --b, (this)->c /= 1;
+#pragma omp parallel
+#pragma omp for lastprivate(a, b, c)
+ for (int i = 0; i < 2; ++i)
+ ++(this)->a, --b, this->c /= 1;
+ }();
+#elif defined(BLOCKS)
+ ^{
+ ++a;
+ --this->b;
+ (this)->c /= 1;
+#pragma omp parallel
+#pragma omp for lastprivate(a, b, c)
+ for (int i = 0; i < 2; ++i)
+ ++(this)->a, --b, this->c /= 1;
+ }();
+#else
+ ++this->a, --b, c /= 1;
+#endif
+ }
+};
+
+template <typename T>
+struct SST {
+ T a;
+ SST() : a(T()) {
+#pragma omp parallel
+#pragma omp for lastprivate(a)
+ for (int i = 0; i < 2; ++i)
+#ifdef LAMBDA
+ [&]() {
+ [&]() {
+ ++this->a;
+#pragma omp parallel
+#pragma omp for lastprivate(a)
+ for (int i = 0; i < 2; ++i)
+ ++(this)->a;
+ }();
+ }();
+#elif defined(BLOCKS)
+ ^{
+ ^{
+ ++a;
+#pragma omp parallel
+#pragma omp for lastprivate(a)
+ for (int i = 0; i < 2; ++i)
+ ++(this)->a;
+ }();
+ }();
+#else
+ ++(this)->a;
+#endif
+ }
+};
+
template <class T>
struct S {
T f;
float f;
char cnt;
+// CHECK: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
+// LAMBDA: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
+// BLOCKS: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8
// CHECK: [[S_FLOAT_TY:%.+]] = type { float }
// CHECK: [[S_INT_TY:%.+]] = type { i32 }
// CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
template <typename T>
T tmain() {
S<T> test;
+ SST<T> sst;
T t_var __attribute__((aligned(128))) = T();
T vec[] __attribute__((aligned(128))) = {1, 2};
S<T> s_arr[] __attribute__((aligned(128))) = {1, 2};
int main() {
static int sivar;
+ SS ss(sivar);
#ifdef LAMBDA
// LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
// LAMBDA: [[SIVAR:@.+]] = internal global i{{[0-9]+}} 0,
// LAMBDA-LABEL: @main
- // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
+ // LAMBDA: alloca [[SS_TY]],
+ // LAMBDA: alloca [[CAP_TY:%.+]],
+ // LAMBDA: call void [[OUTER_LAMBDA:@.+]]([[CAP_TY]]*
[&]() {
// LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
// LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* %{{.+}})
#pragma omp parallel
#pragma omp for lastprivate(g, g1, sivar)
for (int i = 0; i < 2; ++i) {
+ // LAMBDA: define {{.+}} @{{.+}}([[SS_TY]]*
+ // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
+ // LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
+ // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
+ // LAMBDA: store i8
+ // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
+ // LAMBDA: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
+ // LAMBDA: ret
+
+ // LAMBDA: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
+ // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
+ // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
+ // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
+ // LAMBDA: call void @__kmpc_for_static_init_4(
+ // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
+ // LAMBDA: call{{.*}} void
+ // LAMBDA: call void @__kmpc_for_static_fini(
+ // LAMBDA: br i1
+ // LAMBDA: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
+ // LAMBDA: store i8 %{{.+}}, i8* [[B_REF]],
+ // LAMBDA: br label
+ // LAMBDA: ret void
+
+ // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
+ // LAMBDA: alloca i{{[0-9]+}},
+ // LAMBDA: alloca i{{[0-9]+}},
+ // LAMBDA: alloca i{{[0-9]+}},
+ // LAMBDA: alloca i{{[0-9]+}},
+ // LAMBDA: alloca i{{[0-9]+}},
+ // LAMBDA: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
+ // LAMBDA: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
+ // LAMBDA: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
+ // LAMBDA: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
+ // LAMBDA: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
+ // LAMBDA: call void @__kmpc_for_static_init_4(
+ // LAMBDA: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
+ // LAMBDA-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
+ // LAMBDA-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
+ // LAMBDA-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
+ // LAMBDA-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
+ // LAMBDA-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
+ // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
+ // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
+ // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
+ // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+ // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
+ // LAMBDA: call void @__kmpc_for_static_fini(
+ // LAMBDA: br i1
+ // LAMBDA: br label
+ // LAMBDA: ret void
+
// LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]])
// LAMBDA: alloca i{{[0-9]+}},
// LAMBDA: alloca i{{[0-9]+}},
#elif defined(BLOCKS)
// BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
// BLOCKS-LABEL: @main
+ // BLOCKS: call
// BLOCKS: call void {{%.+}}(i8
^{
// BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
}
}();
return 0;
+// BLOCKS: define {{.+}} @{{.+}}([[SS_TY]]*
+// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
+// BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
+// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
+// BLOCKS: store i8
+// BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
+// BLOCKS: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
+// BLOCKS: ret
+
+// BLOCKS: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
+// BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0
+// BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
+// BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2
+// BLOCKS: call void @__kmpc_for_static_init_4(
+// BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]*
+// BLOCKS: call{{.*}} void
+// BLOCKS: call void @__kmpc_for_static_fini(
+// BLOCKS: br i1
+// BLOCKS: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
+// BLOCKS: store i8 %{{.+}}, i8* [[B_REF]],
+// BLOCKS: br label
+// BLOCKS: ret void
+
+// BLOCKS: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}})
+// BLOCKS: alloca i{{[0-9]+}},
+// BLOCKS: alloca i{{[0-9]+}},
+// BLOCKS: alloca i{{[0-9]+}},
+// BLOCKS: alloca i{{[0-9]+}},
+// BLOCKS: alloca i{{[0-9]+}},
+// BLOCKS: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
+// BLOCKS: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
+// BLOCKS: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
+// BLOCKS: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
+// BLOCKS: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
+// BLOCKS: call void @__kmpc_for_static_init_4(
+// BLOCKS: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
+// BLOCKS-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
+// BLOCKS-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
+// BLOCKS-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
+// BLOCKS-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
+// BLOCKS-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
+// BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
+// BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
+// BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
+// BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
+// BLOCKS: call void @__kmpc_for_static_fini(
+// BLOCKS: br i1
+// BLOCKS: br label
+// BLOCKS: ret void
#else
S<float> test;
int t_var = 0;
// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
// CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
// CHECK: ret
-//
+
+// CHECK: define {{.+}} @{{.+}}([[SS_TY]]*
+// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0
+// CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* %
+// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1
+// CHECK: store i8
+// CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2
+// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void
+// CHECK: ret
+
+// CHECK: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}})
+// CHECK: alloca i{{[0-9]+}},
+// CHECK: alloca i{{[0-9]+}},
+// CHECK: alloca i{{[0-9]+}},
+// CHECK: alloca i{{[0-9]+}},
+// CHECK: alloca i{{[0-9]+}},
+// CHECK: alloca i{{[0-9]+}},
+// CHECK: [[A_PRIV:%.+]] = alloca i{{[0-9]+}},
+// CHECK: [[B_PRIV:%.+]] = alloca i{{[0-9]+}},
+// CHECK: [[C_PRIV:%.+]] = alloca i{{[0-9]+}},
+// CHECK: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]],
+// CHECK: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]],
+// CHECK: call void @__kmpc_for_static_init_4(
+// CHECK: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]],
+// CHECK-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]],
+// CHECK-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1
+// CHECK-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]],
+// CHECK-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]],
+// CHECK-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1
+// CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
+// CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
+// CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
+// CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
+// CHECK: call void @__kmpc_for_static_fini(
+// CHECK: br i1
+// CHECK: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1
+// CHECK: store i8 %{{.+}}, i8* [[B_REF]],
+// CHECK: br label
+// CHECK: ret void
+
// CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* dereferenceable(8) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}})
// CHECK: alloca i{{[0-9]+}},
// CHECK: alloca i{{[0-9]+}},
// CHECK: [[GTID:%.+]] = call {{.*}}i32 @__kmpc_global_thread_num([[IDENT_T_TY]]* [[DEF_LOC_2]])
// CHECK: call {{.*}}void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 [[GTID]], i32 1)
// CHECK: call {{.*}}void {{.*}} @__kmpc_fork_call(
-// CHECK: invoke {{.*}} [[S_TY_CONSTR]]([[S_TY]]* [[S_TEMP:%.+]], [[INTPTR_T_TY]] [[INTPTR_T_TY_ATTR]]23)
+// CHECK: {{(invoke|call)}} {{.*}} [[S_TY_CONSTR]]([[S_TY]]* [[S_TEMP:%.+]], [[INTPTR_T_TY]] [[INTPTR_T_TY_ATTR]]23)
// CHECK: [[S_CHAR_OP:%.+]] = invoke{{.*}} i8 [[S_TY_CHAR_OP]]([[S_TY]]* [[S_TEMP]])
// CHECK: [[RES:%.+]] = sext {{.*}}i8 [[S_CHAR_OP]] to i32
// CHECK: call {{.*}}void @__kmpc_push_num_threads([[IDENT_T_TY]]* [[DEF_LOC_2]], i32 [[GTID]], i32 [[RES]])
void Visit##Class(const Class *C);
#include "clang/Basic/OpenMPKinds.def"
void VisitOMPClauseWithPreInit(const OMPClauseWithPreInit *C);
+ void VisitOMPClauseWithPostUpdate(const OMPClauseWithPostUpdate *C);
};
void OMPClauseEnqueue::VisitOMPClauseWithPreInit(
Visitor->AddStmt(C->getPreInitStmt());
}
+void OMPClauseEnqueue::VisitOMPClauseWithPostUpdate(
+ const OMPClauseWithPostUpdate *C) {
+ Visitor->AddStmt(C->getPostUpdateExpr());
+}
+
void OMPClauseEnqueue::VisitOMPIfClause(const OMPIfClause *C) {
Visitor->AddStmt(C->getCondition());
}
void OMPClauseEnqueue::VisitOMPLastprivateClause(
const OMPLastprivateClause *C) {
VisitOMPClauseList(C);
+ VisitOMPClauseWithPreInit(C);
+ VisitOMPClauseWithPostUpdate(C);
for (auto *E : C->private_copies()) {
Visitor->AddStmt(E);
}