1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
41 #include "tree-pass.h"
44 #include "splay-tree.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
94 /* What to do with variables with implicitly determined sharing
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
103 /* True if this parallel directive is nested within another. */
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
118 struct omp_for_data_loop loop;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
176 /* Return true if CTX is for an omp parallel. */
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
224 struct omp_for_data_loop dummy_loop;
226 fd->for_stmt = for_stmt;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
232 fd->loops = &fd->loop;
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
286 for (i = 0; i < fd->collapse; i++)
288 if (fd->collapse == 1)
290 else if (loops != NULL)
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
349 if (iter_type != long_long_unsigned_type_node)
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
373 if (loop->cond_code == LT_EXPR)
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
393 if (collapse_count && *collapse_count == NULL)
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
401 tree itype = TREE_TYPE (loop->v);
403 if (POINTER_TYPE_P (itype))
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
428 if (TREE_CODE (count) != INTEGER_CST)
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
441 iter_type = long_integer_type_node;
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
451 *collapse_count = fold_convert (iter_type, count);
453 *collapse_count = create_tmp_var (iter_type, ".count");
456 if (fd->collapse > 1)
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
478 #pragma omp parallel for schedule (guided, i * 4)
483 # BLOCK 2 (PAR_ENTRY_BB)
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
491 #pragma omp for schedule (guided, D.1598)
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
523 extract_omp_for_data (ws_stmt, &fd, NULL);
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
527 if (fd.iter_type != long_integer_type_node)
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
550 get_ws_args_for (gimple ws_stmt)
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
556 struct omp_for_data fd;
559 extract_omp_for_data (ws_stmt, &fd, NULL);
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
594 /* Discover whether REGION is a combined parallel+workshare region. */
597 determine_parallel_type (struct omp_region *region)
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
627 gimple ws_stmt = last_stmt (ws_entry_bb);
629 if (region->inner->type == GIMPLE_OMP_FOR)
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
659 /* Return true if EXPR is variable sized. */
662 is_variable_sized (const_tree expr)
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
667 /* Return true if DECL is a reference type. */
670 is_reference (tree decl)
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
680 lookup_decl (tree var, omp_context *ctx)
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
696 lookup_field (tree var, omp_context *ctx)
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
704 lookup_sfield (tree var, omp_context *ctx)
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
714 maybe_lookup_field (tree var, omp_context *ctx)
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
748 /* Do not use copy-in/copy-out for variables that have their
750 if (TREE_ADDRESSABLE (decl))
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
805 /* Create a new VAR_DECL and copy information from VAR to it. */
808 copy_var_decl (tree var, tree name, tree type)
810 tree copy = build_decl (VAR_DECL, name, type);
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
820 TREE_USED (copy) = 1;
821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
826 /* Construct a new automatic decl similar to VAR. */
829 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
831 tree copy = copy_var_decl (var, name, type);
833 DECL_CONTEXT (copy) = current_function_decl;
834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
841 omp_copy_decl_1 (tree var, omp_context *ctx)
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
846 /* Build tree nodes to access the field for VAR on the receiver side. */
849 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
851 tree x, field = lookup_field (var, ctx);
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
862 x = build_fold_indirect_ref (x);
867 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
872 build_outer_var_ref (tree var, omp_context *ctx)
876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
878 else if (is_variable_sized (var))
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
884 else if (is_taskreg_ctx (ctx))
886 bool by_ref = use_pointer_for_field (var, NULL);
887 x = build_receiver_ref (var, by_ref, ctx);
890 x = lookup_decl (var, ctx->outer);
891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
904 /* Build tree nodes to access the field for VAR on the sender side. */
907 build_sender_ref (tree var, omp_context *ctx)
909 tree field = lookup_sfield (var, ctx);
910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
914 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
917 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
919 tree field, type, sfield = NULL_TREE;
921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
926 type = TREE_TYPE (var);
928 type = build_pointer_type (type);
929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
938 if (type == TREE_TYPE (var))
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
962 if (ctx->srecord_type == NULL_TREE)
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
992 install_var_local (tree var, omp_context *ctx)
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
999 /* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1003 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1005 tree new_decl, size;
1007 new_decl = lookup_decl (decl, ctx);
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1014 tree ve = DECL_VALUE_EXPR (decl);
1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1034 /* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1040 omp_copy_decl (tree var, copy_body_data *cb)
1042 omp_context *ctx = (omp_context *) cb;
1045 if (TREE_CODE (var) == LABEL_DECL)
1047 new_var = create_artificial_label ();
1048 DECL_CONTEXT (new_var) = current_function_decl;
1049 insert_decl_map (&ctx->cb, var, new_var);
1053 while (!is_taskreg_ctx (ctx))
1058 new_var = maybe_lookup_decl (var, ctx);
1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1066 return error_mark_node;
1070 /* Return the parallel region associated with STMT. */
1072 /* Debugging dumps for parallel regions. */
1073 void dump_omp_region (FILE *, struct omp_region *, int);
1074 void debug_omp_region (struct omp_region *);
1075 void debug_all_omp_regions (void);
1077 /* Dump the parallel region tree rooted at REGION. */
1080 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1083 gimple_code_name[region->type]);
1086 dump_omp_region (file, region->inner, indent + 4);
1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1091 region->cont->index);
1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1096 region->exit->index);
1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
1101 dump_omp_region (file, region->next, indent);
1105 debug_omp_region (struct omp_region *region)
1107 dump_omp_region (stderr, region, 0);
1111 debug_all_omp_regions (void)
1113 dump_omp_region (stderr, root_omp_region, 0);
1117 /* Create a new parallel region starting at STMT inside region PARENT. */
1120 new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
1123 struct omp_region *region = XCNEW (struct omp_region);
1125 region->outer = parent;
1127 region->type = type;
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1147 /* Release the memory associated with the region tree rooted at REGION. */
1150 free_omp_region_1 (struct omp_region *region)
1152 struct omp_region *i, *n;
1154 for (i = region->inner; i ; i = n)
1157 free_omp_region_1 (i);
1163 /* Release the memory for the entire omp region tree. */
1166 free_omp_regions (void)
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1172 free_omp_region_1 (r);
1174 root_omp_region = NULL;
1178 /* Create a new context, with OUTER_CTX being the surrounding context. */
1180 static omp_context *
1181 new_omp_context (gimple stmt, omp_context *outer_ctx)
1183 omp_context *ctx = XCNEW (omp_context);
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1209 ctx->cb.decl_map = pointer_map_create ();
1214 static gimple_seq maybe_catch_exception (gimple_seq);
1216 /* Finalize task copyfn. */
1219 finalize_task_copyfn (gimple task_stmt)
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
1223 gimple_seq seq, new_seq;
1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
1227 if (child_fn == NULL_TREE)
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1249 gimple_set_body (child_fn, seq);
1251 current_function_decl = old_fn;
1253 cgraph_add_new_function (child_fn, false);
1256 /* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1260 delete_omp_context (splay_tree_value value)
1262 omp_context *ctx = (omp_context *) value;
1264 pointer_map_destroy (ctx->cb.decl_map);
1267 splay_tree_delete (ctx->field_map);
1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1279 if (ctx->srecord_type)
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1292 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1296 fixup_child_record_type (omp_context *ctx)
1298 tree f, type = ctx->record_type;
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1309 tree name, new_fields = NULL;
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1341 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1345 scan_sharing_clauses (tree clauses, omp_context *ctx)
1348 bool scan_array_reductions = false;
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1354 switch (OMP_CLAUSE_CODE (c))
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1360 else if (!is_variable_sized (decl))
1361 install_var_local (decl, ctx);
1364 case OMP_CLAUSE_SHARED:
1365 gcc_assert (is_taskreg_ctx (ctx));
1366 decl = OMP_CLAUSE_DECL (c);
1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1373 by_ref = use_pointer_for_field (decl, ctx);
1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1377 || is_reference (decl))
1379 install_var_field (decl, by_ref, 3, ctx);
1380 install_var_local (decl, ctx);
1383 /* We don't need to copy const scalar vars back. */
1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1398 if (is_variable_sized (decl))
1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1404 else if (is_taskreg_ctx (ctx))
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1408 by_ref = use_pointer_for_field (decl, NULL);
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1413 install_var_field (decl, false, 1, ctx);
1415 install_var_field (decl, by_ref, 2, ctx);
1418 install_var_field (decl, by_ref, 3, ctx);
1420 install_var_local (decl, ctx);
1423 case OMP_CLAUSE_COPYPRIVATE:
1425 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1428 case OMP_CLAUSE_COPYIN:
1429 decl = OMP_CLAUSE_DECL (c);
1430 by_ref = use_pointer_for_field (decl, NULL);
1431 install_var_field (decl, by_ref, 3, ctx);
1434 case OMP_CLAUSE_DEFAULT:
1435 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1439 case OMP_CLAUSE_NUM_THREADS:
1440 case OMP_CLAUSE_SCHEDULE:
1442 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1445 case OMP_CLAUSE_NOWAIT:
1446 case OMP_CLAUSE_ORDERED:
1447 case OMP_CLAUSE_COLLAPSE:
1448 case OMP_CLAUSE_UNTIED:
1456 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1458 switch (OMP_CLAUSE_CODE (c))
1460 case OMP_CLAUSE_LASTPRIVATE:
1461 /* Let the corresponding firstprivate clause create
1463 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1464 scan_array_reductions = true;
1465 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1469 case OMP_CLAUSE_PRIVATE:
1470 case OMP_CLAUSE_FIRSTPRIVATE:
1471 case OMP_CLAUSE_REDUCTION:
1472 decl = OMP_CLAUSE_DECL (c);
1473 if (is_variable_sized (decl))
1474 install_var_local (decl, ctx);
1475 fixup_remapped_decl (decl, ctx,
1476 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1477 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1478 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1479 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1480 scan_array_reductions = true;
1483 case OMP_CLAUSE_SHARED:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1486 fixup_remapped_decl (decl, ctx, false);
1489 case OMP_CLAUSE_COPYPRIVATE:
1490 case OMP_CLAUSE_COPYIN:
1491 case OMP_CLAUSE_DEFAULT:
1493 case OMP_CLAUSE_NUM_THREADS:
1494 case OMP_CLAUSE_SCHEDULE:
1495 case OMP_CLAUSE_NOWAIT:
1496 case OMP_CLAUSE_ORDERED:
1497 case OMP_CLAUSE_COLLAPSE:
1498 case OMP_CLAUSE_UNTIED:
1506 if (scan_array_reductions)
1507 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1512 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1514 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1515 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1516 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1519 /* Create a new name for omp child function. Returns an identifier. */
1521 static GTY(()) unsigned int tmp_ompfn_id_num;
1524 create_omp_child_function_name (bool task_copy)
1526 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1527 size_t len = IDENTIFIER_LENGTH (name);
1528 char *tmp_name, *prefix;
1531 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1532 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1533 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1534 strcpy (prefix + len, suffix);
1535 #ifndef NO_DOT_IN_LABEL
1537 #elif !defined NO_DOLLAR_IN_LABEL
1540 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1541 return get_identifier (tmp_name);
1544 /* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1548 create_omp_child_function (omp_context *ctx, bool task_copy)
1550 tree decl, type, name, t;
1552 name = create_omp_child_function_name (task_copy);
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1559 decl = build_decl (FUNCTION_DECL, name, type);
1560 decl = lang_hooks.decls.pushdecl (decl);
1563 ctx->cb.dst_fn = decl;
1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
1575 DECL_INITIAL (decl) = make_node (BLOCK);
1577 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_RESULT (decl) = t;
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1587 DECL_ARGUMENTS (decl) = t;
1589 ctx->receiver_decl = t;
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1598 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1599 DECL_ARGUMENTS (decl) = t;
1602 /* Allocate memory for the function structure. The call to
1603 allocate_struct_function clobbers CFUN, so we need to restore
1605 push_struct_function (decl);
1606 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1607 cfun->function_end_locus = gimple_location (ctx->stmt);
1612 /* Scan an OpenMP parallel directive. */
1615 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1619 gimple stmt = gsi_stmt (*gsi);
1621 /* Ignore parallel directives with empty bodies, unless there
1622 are copyin clauses. */
1624 && empty_body_p (gimple_omp_body (stmt))
1625 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1626 OMP_CLAUSE_COPYIN) == NULL)
1628 gsi_replace (gsi, gimple_build_nop (), false);
1632 ctx = new_omp_context (stmt, outer_ctx);
1633 if (taskreg_nesting_level > 1)
1634 ctx->is_nested = true;
1635 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1636 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1637 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1638 name = create_tmp_var_name (".omp_data_s");
1639 name = build_decl (TYPE_DECL, name, ctx->record_type);
1640 TYPE_NAME (ctx->record_type) = name;
1641 create_omp_child_function (ctx, false);
1642 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1644 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1645 scan_omp (gimple_omp_body (stmt), ctx);
1647 if (TYPE_FIELDS (ctx->record_type) == NULL)
1648 ctx->record_type = ctx->receiver_decl = NULL;
1651 layout_type (ctx->record_type);
1652 fixup_child_record_type (ctx);
1656 /* Scan an OpenMP task directive. */
1659 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1663 gimple stmt = gsi_stmt (*gsi);
1665 /* Ignore task directives with empty bodies. */
1667 && empty_body_p (gimple_omp_body (stmt)))
1669 gsi_replace (gsi, gimple_build_nop (), false);
1673 ctx = new_omp_context (stmt, outer_ctx);
1674 if (taskreg_nesting_level > 1)
1675 ctx->is_nested = true;
1676 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1677 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1678 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1679 name = create_tmp_var_name (".omp_data_s");
1680 name = build_decl (TYPE_DECL, name, ctx->record_type);
1681 TYPE_NAME (ctx->record_type) = name;
1682 create_omp_child_function (ctx, false);
1683 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1685 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1687 if (ctx->srecord_type)
1689 name = create_tmp_var_name (".omp_data_a");
1690 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1691 TYPE_NAME (ctx->srecord_type) = name;
1692 create_omp_child_function (ctx, true);
1695 scan_omp (gimple_omp_body (stmt), ctx);
1697 if (TYPE_FIELDS (ctx->record_type) == NULL)
1699 ctx->record_type = ctx->receiver_decl = NULL;
1700 t = build_int_cst (long_integer_type_node, 0);
1701 gimple_omp_task_set_arg_size (stmt, t);
1702 t = build_int_cst (long_integer_type_node, 1);
1703 gimple_omp_task_set_arg_align (stmt, t);
1707 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1708 /* Move VLA fields to the end. */
1709 p = &TYPE_FIELDS (ctx->record_type);
1711 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1712 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1715 *p = TREE_CHAIN (*p);
1716 TREE_CHAIN (*q) = NULL_TREE;
1717 q = &TREE_CHAIN (*q);
1720 p = &TREE_CHAIN (*p);
1722 layout_type (ctx->record_type);
1723 fixup_child_record_type (ctx);
1724 if (ctx->srecord_type)
1725 layout_type (ctx->srecord_type);
1726 t = fold_convert (long_integer_type_node,
1727 TYPE_SIZE_UNIT (ctx->record_type));
1728 gimple_omp_task_set_arg_size (stmt, t);
1729 t = build_int_cst (long_integer_type_node,
1730 TYPE_ALIGN_UNIT (ctx->record_type));
1731 gimple_omp_task_set_arg_align (stmt, t);
1736 /* Scan an OpenMP loop directive. */
1739 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1744 ctx = new_omp_context (stmt, outer_ctx);
1746 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1748 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1749 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1751 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1752 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1756 scan_omp (gimple_omp_body (stmt), ctx);
1759 /* Scan an OpenMP sections directive. */
1762 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1766 ctx = new_omp_context (stmt, outer_ctx);
1767 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1768 scan_omp (gimple_omp_body (stmt), ctx);
1771 /* Scan an OpenMP single directive. */
1774 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1779 ctx = new_omp_context (stmt, outer_ctx);
1780 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1781 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1782 name = create_tmp_var_name (".omp_copy_s");
1783 name = build_decl (TYPE_DECL, name, ctx->record_type);
1784 TYPE_NAME (ctx->record_type) = name;
1786 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1787 scan_omp (gimple_omp_body (stmt), ctx);
1789 if (TYPE_FIELDS (ctx->record_type) == NULL)
1790 ctx->record_type = NULL;
1792 layout_type (ctx->record_type);
1796 /* Check OpenMP nesting restrictions. */
1798 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1800 switch (gimple_code (stmt))
1802 case GIMPLE_OMP_FOR:
1803 case GIMPLE_OMP_SECTIONS:
1804 case GIMPLE_OMP_SINGLE:
1806 for (; ctx != NULL; ctx = ctx->outer)
1807 switch (gimple_code (ctx->stmt))
1809 case GIMPLE_OMP_FOR:
1810 case GIMPLE_OMP_SECTIONS:
1811 case GIMPLE_OMP_SINGLE:
1812 case GIMPLE_OMP_ORDERED:
1813 case GIMPLE_OMP_MASTER:
1814 case GIMPLE_OMP_TASK:
1815 if (is_gimple_call (stmt))
1817 warning (0, "barrier region may not be closely nested inside "
1818 "of work-sharing, critical, ordered, master or "
1819 "explicit task region");
1822 warning (0, "work-sharing region may not be closely nested inside "
1823 "of work-sharing, critical, ordered, master or explicit "
1826 case GIMPLE_OMP_PARALLEL:
1832 case GIMPLE_OMP_MASTER:
1833 for (; ctx != NULL; ctx = ctx->outer)
1834 switch (gimple_code (ctx->stmt))
1836 case GIMPLE_OMP_FOR:
1837 case GIMPLE_OMP_SECTIONS:
1838 case GIMPLE_OMP_SINGLE:
1839 case GIMPLE_OMP_TASK:
1840 warning (0, "master region may not be closely nested inside "
1841 "of work-sharing or explicit task region");
1843 case GIMPLE_OMP_PARALLEL:
1849 case GIMPLE_OMP_ORDERED:
1850 for (; ctx != NULL; ctx = ctx->outer)
1851 switch (gimple_code (ctx->stmt))
1853 case GIMPLE_OMP_CRITICAL:
1854 case GIMPLE_OMP_TASK:
1855 warning (0, "ordered region may not be closely nested inside "
1856 "of critical or explicit task region");
1858 case GIMPLE_OMP_FOR:
1859 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1860 OMP_CLAUSE_ORDERED) == NULL)
1861 warning (0, "ordered region must be closely nested inside "
1862 "a loop region with an ordered clause");
1864 case GIMPLE_OMP_PARALLEL:
1870 case GIMPLE_OMP_CRITICAL:
1871 for (; ctx != NULL; ctx = ctx->outer)
1872 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1873 && (gimple_omp_critical_name (stmt)
1874 == gimple_omp_critical_name (ctx->stmt)))
1876 warning (0, "critical region may not be nested inside a critical "
1877 "region with the same name");
1887 /* Helper function scan_omp.
1889 Callback for walk_tree or operators in walk_gimple_stmt used to
1890 scan for OpenMP directives in TP. */
1893 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1895 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1896 omp_context *ctx = (omp_context *) wi->info;
1899 switch (TREE_CODE (t))
1906 *tp = remap_decl (t, &ctx->cb);
1910 if (ctx && TYPE_P (t))
1911 *tp = remap_type (t, &ctx->cb);
1912 else if (!DECL_P (t))
1921 /* Helper function for scan_omp.
1923 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1924 the current statement in GSI. */
1927 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1928 struct walk_stmt_info *wi)
1930 gimple stmt = gsi_stmt (*gsi);
1931 omp_context *ctx = (omp_context *) wi->info;
1933 if (gimple_has_location (stmt))
1934 input_location = gimple_location (stmt);
1936 /* Check the OpenMP nesting restrictions. */
1939 if (is_gimple_omp (stmt))
1940 check_omp_nesting_restrictions (stmt, ctx);
1941 else if (is_gimple_call (stmt))
1943 tree fndecl = gimple_call_fndecl (stmt);
1944 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1945 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1946 check_omp_nesting_restrictions (stmt, ctx);
1950 *handled_ops_p = true;
1952 switch (gimple_code (stmt))
1954 case GIMPLE_OMP_PARALLEL:
1955 taskreg_nesting_level++;
1956 scan_omp_parallel (gsi, ctx);
1957 taskreg_nesting_level--;
1960 case GIMPLE_OMP_TASK:
1961 taskreg_nesting_level++;
1962 scan_omp_task (gsi, ctx);
1963 taskreg_nesting_level--;
1966 case GIMPLE_OMP_FOR:
1967 scan_omp_for (stmt, ctx);
1970 case GIMPLE_OMP_SECTIONS:
1971 scan_omp_sections (stmt, ctx);
1974 case GIMPLE_OMP_SINGLE:
1975 scan_omp_single (stmt, ctx);
1978 case GIMPLE_OMP_SECTION:
1979 case GIMPLE_OMP_MASTER:
1980 case GIMPLE_OMP_ORDERED:
1981 case GIMPLE_OMP_CRITICAL:
1982 ctx = new_omp_context (stmt, ctx);
1983 scan_omp (gimple_omp_body (stmt), ctx);
1990 *handled_ops_p = false;
1992 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1993 insert_decl_map (&ctx->cb, var, var);
1997 *handled_ops_p = false;
2005 /* Scan all the statements starting at the current statement. CTX
2006 contains context information about the OpenMP directives and
2007 clauses found during the scan. */
2010 scan_omp (gimple_seq body, omp_context *ctx)
2012 location_t saved_location;
2013 struct walk_stmt_info wi;
2015 memset (&wi, 0, sizeof (wi));
2017 wi.want_locations = true;
2019 saved_location = input_location;
2020 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2021 input_location = saved_location;
2024 /* Re-gimplification and code generation routines. */
2026 /* Build a call to GOMP_barrier. */
2029 build_omp_barrier (void)
2031 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2034 /* If a context was created for STMT when it was scanned, return it. */
2036 static omp_context *
2037 maybe_lookup_ctx (gimple stmt)
2040 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2041 return n ? (omp_context *) n->value : NULL;
2045 /* Find the mapping for DECL in CTX or the immediately enclosing
2046 context that has a mapping for DECL.
2048 If CTX is a nested parallel directive, we may have to use the decl
2049 mappings created in CTX's parent context. Suppose that we have the
2050 following parallel nesting (variable UIDs showed for clarity):
2053 #omp parallel shared(iD.1562) -> outer parallel
2054 iD.1562 = iD.1562 + 1;
2056 #omp parallel shared (iD.1562) -> inner parallel
2057 iD.1562 = iD.1562 - 1;
2059 Each parallel structure will create a distinct .omp_data_s structure
2060 for copying iD.1562 in/out of the directive:
2062 outer parallel .omp_data_s.1.i -> iD.1562
2063 inner parallel .omp_data_s.2.i -> iD.1562
2065 A shared variable mapping will produce a copy-out operation before
2066 the parallel directive and a copy-in operation after it. So, in
2067 this case we would have:
2070 .omp_data_o.1.i = iD.1562;
2071 #omp parallel shared(iD.1562) -> outer parallel
2072 .omp_data_i.1 = &.omp_data_o.1
2073 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2075 .omp_data_o.2.i = iD.1562; -> **
2076 #omp parallel shared(iD.1562) -> inner parallel
2077 .omp_data_i.2 = &.omp_data_o.2
2078 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2081 ** This is a problem. The symbol iD.1562 cannot be referenced
2082 inside the body of the outer parallel region. But since we are
2083 emitting this copy operation while expanding the inner parallel
2084 directive, we need to access the CTX structure of the outer
2085 parallel directive to get the correct mapping:
2087 .omp_data_o.2.i = .omp_data_i.1->i
2089 Since there may be other workshare or parallel directives enclosing
2090 the parallel directive, it may be necessary to walk up the context
2091 parent chain. This is not a problem in general because nested
2092 parallelism happens only rarely. */
2095 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2100 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2101 t = maybe_lookup_decl (decl, up);
2103 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2105 return t ? t : decl;
2109 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2110 in outer contexts. */
2113 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2118 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2119 t = maybe_lookup_decl (decl, up);
2121 return t ? t : decl;
2125 /* Construct the initialization value for reduction CLAUSE. */
2128 omp_reduction_init (tree clause, tree type)
2130 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2137 case TRUTH_ORIF_EXPR:
2138 case TRUTH_XOR_EXPR:
2140 return fold_convert (type, integer_zero_node);
2143 case TRUTH_AND_EXPR:
2144 case TRUTH_ANDIF_EXPR:
2146 return fold_convert (type, integer_one_node);
2149 return fold_convert (type, integer_minus_one_node);
2152 if (SCALAR_FLOAT_TYPE_P (type))
2154 REAL_VALUE_TYPE max, min;
2155 if (HONOR_INFINITIES (TYPE_MODE (type)))
2158 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2161 real_maxval (&min, 1, TYPE_MODE (type));
2162 return build_real (type, min);
2166 gcc_assert (INTEGRAL_TYPE_P (type));
2167 return TYPE_MIN_VALUE (type);
2171 if (SCALAR_FLOAT_TYPE_P (type))
2173 REAL_VALUE_TYPE max;
2174 if (HONOR_INFINITIES (TYPE_MODE (type)))
2177 real_maxval (&max, 0, TYPE_MODE (type));
2178 return build_real (type, max);
2182 gcc_assert (INTEGRAL_TYPE_P (type));
2183 return TYPE_MAX_VALUE (type);
2191 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2192 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2193 private variables. Initialization statements go in ILIST, while calls
2194 to destructors go in DLIST. */
2197 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2200 gimple_stmt_iterator diter;
2201 tree c, dtor, copyin_seq, x, ptr;
2202 bool copyin_by_ref = false;
2203 bool lastprivate_firstprivate = false;
2206 *dlist = gimple_seq_alloc ();
2207 diter = gsi_start (*dlist);
2210 /* Do all the fixed sized types in the first pass, and the variable sized
2211 types in the second pass. This makes sure that the scalar arguments to
2212 the variable sized types are processed before we use them in the
2213 variable sized operations. */
2214 for (pass = 0; pass < 2; ++pass)
2216 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2218 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2224 case OMP_CLAUSE_PRIVATE:
2225 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2228 case OMP_CLAUSE_SHARED:
2229 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2231 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2234 case OMP_CLAUSE_FIRSTPRIVATE:
2235 case OMP_CLAUSE_COPYIN:
2236 case OMP_CLAUSE_REDUCTION:
2238 case OMP_CLAUSE_LASTPRIVATE:
2239 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2241 lastprivate_firstprivate = true;
2250 new_var = var = OMP_CLAUSE_DECL (c);
2251 if (c_kind != OMP_CLAUSE_COPYIN)
2252 new_var = lookup_decl (var, ctx);
2254 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2259 else if (is_variable_sized (var))
2261 /* For variable sized types, we need to allocate the
2262 actual storage here. Call alloca and store the
2263 result in the pointer decl that we created elsewhere. */
2267 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2272 ptr = DECL_VALUE_EXPR (new_var);
2273 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2274 ptr = TREE_OPERAND (ptr, 0);
2275 gcc_assert (DECL_P (ptr));
2276 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2278 /* void *tmp = __builtin_alloca */
2280 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2281 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2282 gimple_add_tmp_var (tmp);
2283 gimple_call_set_lhs (stmt, tmp);
2285 gimple_seq_add_stmt (ilist, stmt);
2287 x = fold_convert (TREE_TYPE (ptr), tmp);
2288 gimplify_assign (ptr, x, ilist);
2291 else if (is_reference (var))
2293 /* For references that are being privatized for Fortran,
2294 allocate new backing storage for the new pointer
2295 variable. This allows us to avoid changing all the
2296 code that expects a pointer to something that expects
2297 a direct variable. Note that this doesn't apply to
2298 C++, since reference types are disallowed in data
2299 sharing clauses there, except for NRV optimized
2304 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2305 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2307 x = build_receiver_ref (var, false, ctx);
2308 x = build_fold_addr_expr (x);
2310 else if (TREE_CONSTANT (x))
2312 const char *name = NULL;
2313 if (DECL_NAME (var))
2314 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2316 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2318 gimple_add_tmp_var (x);
2319 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2323 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2324 x = fold_convert (TREE_TYPE (new_var), x);
2327 gimplify_assign (new_var, x, ilist);
2329 new_var = build_fold_indirect_ref (new_var);
2331 else if (c_kind == OMP_CLAUSE_REDUCTION
2332 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2340 switch (OMP_CLAUSE_CODE (c))
2342 case OMP_CLAUSE_SHARED:
2343 /* Shared global vars are just accessed directly. */
2344 if (is_global_var (new_var))
2346 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2347 needs to be delayed until after fixup_child_record_type so
2348 that we get the correct type during the dereference. */
2349 by_ref = use_pointer_for_field (var, ctx);
2350 x = build_receiver_ref (var, by_ref, ctx);
2351 SET_DECL_VALUE_EXPR (new_var, x);
2352 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2354 /* ??? If VAR is not passed by reference, and the variable
2355 hasn't been initialized yet, then we'll get a warning for
2356 the store into the omp_data_s structure. Ideally, we'd be
2357 able to notice this and not store anything at all, but
2358 we're generating code too early. Suppress the warning. */
2360 TREE_NO_WARNING (var) = 1;
2363 case OMP_CLAUSE_LASTPRIVATE:
2364 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2368 case OMP_CLAUSE_PRIVATE:
2369 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2370 x = build_outer_var_ref (var, ctx);
2371 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2373 if (is_task_ctx (ctx))
2374 x = build_receiver_ref (var, false, ctx);
2376 x = build_outer_var_ref (var, ctx);
2380 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2382 gimplify_and_add (x, ilist);
2386 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2389 gimple_seq tseq = NULL;
2392 gimplify_stmt (&dtor, &tseq);
2393 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2397 case OMP_CLAUSE_FIRSTPRIVATE:
2398 if (is_task_ctx (ctx))
2400 if (is_reference (var) || is_variable_sized (var))
2402 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2404 || use_pointer_for_field (var, NULL))
2406 x = build_receiver_ref (var, false, ctx);
2407 SET_DECL_VALUE_EXPR (new_var, x);
2408 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2412 x = build_outer_var_ref (var, ctx);
2413 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2414 gimplify_and_add (x, ilist);
2418 case OMP_CLAUSE_COPYIN:
2419 by_ref = use_pointer_for_field (var, NULL);
2420 x = build_receiver_ref (var, by_ref, ctx);
2421 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2422 append_to_statement_list (x, ©in_seq);
2423 copyin_by_ref |= by_ref;
2426 case OMP_CLAUSE_REDUCTION:
2427 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2429 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2430 x = build_outer_var_ref (var, ctx);
2432 if (is_reference (var))
2433 x = build_fold_addr_expr (x);
2434 SET_DECL_VALUE_EXPR (placeholder, x);
2435 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2436 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2437 gimple_seq_add_seq (ilist,
2438 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2439 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2440 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2444 x = omp_reduction_init (c, TREE_TYPE (new_var));
2445 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2446 gimplify_assign (new_var, x, ilist);
2456 /* The copyin sequence is not to be executed by the main thread, since
2457 that would result in self-copies. Perhaps not visible to scalars,
2458 but it certainly is to C++ operator=. */
2461 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2462 x = build2 (NE_EXPR, boolean_type_node, x,
2463 build_int_cst (TREE_TYPE (x), 0));
2464 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2465 gimplify_and_add (x, ilist);
2468 /* If any copyin variable is passed by reference, we must ensure the
2469 master thread doesn't modify it before it is copied over in all
2470 threads. Similarly for variables in both firstprivate and
2471 lastprivate clauses we need to ensure the lastprivate copying
2472 happens after firstprivate copying in all threads. */
2473 if (copyin_by_ref || lastprivate_firstprivate)
2474 gimplify_and_add (build_omp_barrier (), ilist);
2478 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2479 both parallel and workshare constructs. PREDICATE may be NULL if it's
2483 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2486 tree x, c, label = NULL;
2487 bool par_clauses = false;
2489 /* Early exit if there are no lastprivate clauses. */
2490 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2491 if (clauses == NULL)
2493 /* If this was a workshare clause, see if it had been combined
2494 with its parallel. In that case, look for the clauses on the
2495 parallel statement itself. */
2496 if (is_parallel_ctx (ctx))
2500 if (ctx == NULL || !is_parallel_ctx (ctx))
2503 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2504 OMP_CLAUSE_LASTPRIVATE);
2505 if (clauses == NULL)
2513 tree label_true, arm1, arm2;
2515 label = create_artificial_label ();
2516 label_true = create_artificial_label ();
2517 arm1 = TREE_OPERAND (predicate, 0);
2518 arm2 = TREE_OPERAND (predicate, 1);
2519 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2520 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2521 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2523 gimple_seq_add_stmt (stmt_list, stmt);
2524 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2527 for (c = clauses; c ;)
2531 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2533 var = OMP_CLAUSE_DECL (c);
2534 new_var = lookup_decl (var, ctx);
2536 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2538 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2539 gimple_seq_add_seq (stmt_list,
2540 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2542 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2544 x = build_outer_var_ref (var, ctx);
2545 if (is_reference (var))
2546 new_var = build_fold_indirect_ref (new_var);
2547 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2548 gimplify_and_add (x, stmt_list);
2550 c = OMP_CLAUSE_CHAIN (c);
2551 if (c == NULL && !par_clauses)
2553 /* If this was a workshare clause, see if it had been combined
2554 with its parallel. In that case, continue looking for the
2555 clauses also on the parallel statement itself. */
2556 if (is_parallel_ctx (ctx))
2560 if (ctx == NULL || !is_parallel_ctx (ctx))
2563 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2564 OMP_CLAUSE_LASTPRIVATE);
2570 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2574 /* Generate code to implement the REDUCTION clauses. */
2577 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2579 gimple_seq sub_seq = NULL;
2584 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2585 update in that case, otherwise use a lock. */
2586 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2587 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2589 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2591 /* Never use OMP_ATOMIC for array reductions. */
2601 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2603 tree var, ref, new_var;
2604 enum tree_code code;
2606 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2609 var = OMP_CLAUSE_DECL (c);
2610 new_var = lookup_decl (var, ctx);
2611 if (is_reference (var))
2612 new_var = build_fold_indirect_ref (new_var);
2613 ref = build_outer_var_ref (var, ctx);
2614 code = OMP_CLAUSE_REDUCTION_CODE (c);
2616 /* reduction(-:var) sums up the partial results, so it acts
2617 identically to reduction(+:var). */
2618 if (code == MINUS_EXPR)
2623 tree addr = build_fold_addr_expr (ref);
2625 addr = save_expr (addr);
2626 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2627 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2628 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2629 gimplify_and_add (x, stmt_seqp);
2633 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2635 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2637 if (is_reference (var))
2638 ref = build_fold_addr_expr (ref);
2639 SET_DECL_VALUE_EXPR (placeholder, ref);
2640 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2641 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2642 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2643 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2644 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2648 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2649 ref = build_outer_var_ref (var, ctx);
2650 gimplify_assign (ref, x, &sub_seq);
2654 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2655 gimple_seq_add_stmt (stmt_seqp, stmt);
2657 gimple_seq_add_seq (stmt_seqp, sub_seq);
2659 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2660 gimple_seq_add_stmt (stmt_seqp, stmt);
2664 /* Generate code to implement the COPYPRIVATE clauses. */
2667 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2672 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2677 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2680 var = OMP_CLAUSE_DECL (c);
2681 by_ref = use_pointer_for_field (var, NULL);
2683 ref = build_sender_ref (var, ctx);
2684 x = lookup_decl_in_outer_ctx (var, ctx);
2685 x = by_ref ? build_fold_addr_expr (x) : x;
2686 gimplify_assign (ref, x, slist);
2688 ref = build_receiver_ref (var, by_ref, ctx);
2689 if (is_reference (var))
2691 ref = build_fold_indirect_ref (ref);
2692 var = build_fold_indirect_ref (var);
2694 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2695 gimplify_and_add (x, rlist);
2700 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2701 and REDUCTION from the sender (aka parent) side. */
2704 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2709 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2711 tree val, ref, x, var;
2712 bool by_ref, do_in = false, do_out = false;
2714 switch (OMP_CLAUSE_CODE (c))
2716 case OMP_CLAUSE_PRIVATE:
2717 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2720 case OMP_CLAUSE_FIRSTPRIVATE:
2721 case OMP_CLAUSE_COPYIN:
2722 case OMP_CLAUSE_LASTPRIVATE:
2723 case OMP_CLAUSE_REDUCTION:
2729 val = OMP_CLAUSE_DECL (c);
2730 var = lookup_decl_in_outer_ctx (val, ctx);
2732 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2733 && is_global_var (var))
2735 if (is_variable_sized (val))
2737 by_ref = use_pointer_for_field (val, NULL);
2739 switch (OMP_CLAUSE_CODE (c))
2741 case OMP_CLAUSE_PRIVATE:
2742 case OMP_CLAUSE_FIRSTPRIVATE:
2743 case OMP_CLAUSE_COPYIN:
2747 case OMP_CLAUSE_LASTPRIVATE:
2748 if (by_ref || is_reference (val))
2750 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2757 if (lang_hooks.decls.omp_private_outer_ref (val))
2762 case OMP_CLAUSE_REDUCTION:
2764 do_out = !(by_ref || is_reference (val));
2773 ref = build_sender_ref (val, ctx);
2774 x = by_ref ? build_fold_addr_expr (var) : var;
2775 gimplify_assign (ref, x, ilist);
2776 if (is_task_ctx (ctx))
2777 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2782 ref = build_sender_ref (val, ctx);
2783 gimplify_assign (var, ref, olist);
2788 /* Generate code to implement SHARED from the sender (aka parent)
2789 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2790 list things that got automatically shared. */
2793 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2795 tree var, ovar, nvar, f, x, record_type;
2797 if (ctx->record_type == NULL)
2800 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2801 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2803 ovar = DECL_ABSTRACT_ORIGIN (f);
2804 nvar = maybe_lookup_decl (ovar, ctx);
2805 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2808 /* If CTX is a nested parallel directive. Find the immediately
2809 enclosing parallel or workshare construct that contains a
2810 mapping for OVAR. */
2811 var = lookup_decl_in_outer_ctx (ovar, ctx);
2813 if (use_pointer_for_field (ovar, ctx))
2815 x = build_sender_ref (ovar, ctx);
2816 var = build_fold_addr_expr (var);
2817 gimplify_assign (x, var, ilist);
2821 x = build_sender_ref (ovar, ctx);
2822 gimplify_assign (x, var, ilist);
2824 if (!TREE_READONLY (var)
2825 /* We don't need to receive a new reference to a result
2826 or parm decl. In fact we may not store to it as we will
2827 invalidate any pending RSO and generate wrong gimple
2829 && !((TREE_CODE (var) == RESULT_DECL
2830 || TREE_CODE (var) == PARM_DECL)
2831 && DECL_BY_REFERENCE (var)))
2833 x = build_sender_ref (ovar, ctx);
2834 gimplify_assign (var, x, olist);
2841 /* A convenience function to build an empty GIMPLE_COND with just the
2845 gimple_build_cond_empty (tree cond)
2847 enum tree_code pred_code;
2850 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2851 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2855 /* Build the function calls to GOMP_parallel_start etc to actually
2856 generate the parallel operation. REGION is the parallel region
2857 being expanded. BB is the block where to insert the code. WS_ARGS
2858 will be set if this is a call to a combined parallel+workshare
2859 construct, it contains the list of additional arguments needed by
2860 the workshare construct. */
2863 expand_parallel_call (struct omp_region *region, basic_block bb,
2864 gimple entry_stmt, tree ws_args)
2866 tree t, t1, t2, val, cond, c, clauses;
2867 gimple_stmt_iterator gsi;
2871 clauses = gimple_omp_parallel_clauses (entry_stmt);
2873 /* Determine what flavor of GOMP_parallel_start we will be
2875 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2876 if (is_combined_parallel (region))
2878 switch (region->inner->type)
2880 case GIMPLE_OMP_FOR:
2881 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2882 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2883 + (region->inner->sched_kind
2884 == OMP_CLAUSE_SCHEDULE_RUNTIME
2885 ? 3 : region->inner->sched_kind);
2887 case GIMPLE_OMP_SECTIONS:
2888 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2895 /* By default, the value of NUM_THREADS is zero (selected at run time)
2896 and there is no conditional. */
2898 val = build_int_cst (unsigned_type_node, 0);
2900 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2902 cond = OMP_CLAUSE_IF_EXPR (c);
2904 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2906 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2908 /* Ensure 'val' is of the correct type. */
2909 val = fold_convert (unsigned_type_node, val);
2911 /* If we found the clause 'if (cond)', build either
2912 (cond != 0) or (cond ? val : 1u). */
2915 gimple_stmt_iterator gsi;
2917 cond = gimple_boolify (cond);
2919 if (integer_zerop (val))
2920 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2921 build_int_cst (TREE_TYPE (cond), 0));
2924 basic_block cond_bb, then_bb, else_bb;
2925 edge e, e_then, e_else;
2926 tree tmp_then, tmp_else, tmp_join, tmp_var;
2928 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2929 if (gimple_in_ssa_p (cfun))
2931 tmp_then = make_ssa_name (tmp_var, NULL);
2932 tmp_else = make_ssa_name (tmp_var, NULL);
2933 tmp_join = make_ssa_name (tmp_var, NULL);
2942 e = split_block (bb, NULL);
2947 then_bb = create_empty_bb (cond_bb);
2948 else_bb = create_empty_bb (then_bb);
2949 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2950 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2952 stmt = gimple_build_cond_empty (cond);
2953 gsi = gsi_start_bb (cond_bb);
2954 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2956 gsi = gsi_start_bb (then_bb);
2957 stmt = gimple_build_assign (tmp_then, val);
2958 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2960 gsi = gsi_start_bb (else_bb);
2961 stmt = gimple_build_assign
2962 (tmp_else, build_int_cst (unsigned_type_node, 1));
2963 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2965 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2966 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2967 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2968 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2970 if (gimple_in_ssa_p (cfun))
2972 gimple phi = create_phi_node (tmp_join, bb);
2973 SSA_NAME_DEF_STMT (tmp_join) = phi;
2974 add_phi_arg (phi, tmp_then, e_then);
2975 add_phi_arg (phi, tmp_else, e_else);
2981 gsi = gsi_start_bb (bb);
2982 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2983 false, GSI_CONTINUE_LINKING);
2986 gsi = gsi_last_bb (bb);
2987 t = gimple_omp_parallel_data_arg (entry_stmt);
2989 t1 = null_pointer_node;
2991 t1 = build_fold_addr_expr (t);
2992 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
2996 tree args = tree_cons (NULL, t2,
2997 tree_cons (NULL, t1,
2998 tree_cons (NULL, val, ws_args)));
2999 t = build_function_call_expr (built_in_decls[start_ix], args);
3002 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3004 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3005 false, GSI_CONTINUE_LINKING);
3007 t = gimple_omp_parallel_data_arg (entry_stmt);
3009 t = null_pointer_node;
3011 t = build_fold_addr_expr (t);
3012 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3013 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3014 false, GSI_CONTINUE_LINKING);
3016 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3017 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3018 false, GSI_CONTINUE_LINKING);
3022 /* Build the function call to GOMP_task to actually
3023 generate the task operation. BB is the block where to insert the code. */
3026 expand_task_call (basic_block bb, gimple entry_stmt)
3028 tree t, t1, t2, t3, flags, cond, c, clauses;
3029 gimple_stmt_iterator gsi;
3031 clauses = gimple_omp_task_clauses (entry_stmt);
3033 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3035 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3037 cond = boolean_true_node;
3039 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3040 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3042 gsi = gsi_last_bb (bb);
3043 t = gimple_omp_task_data_arg (entry_stmt);
3045 t2 = null_pointer_node;
3047 t2 = build_fold_addr_expr (t);
3048 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3049 t = gimple_omp_task_copy_fn (entry_stmt);
3051 t3 = null_pointer_node;
3053 t3 = build_fold_addr_expr (t);
3055 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3056 gimple_omp_task_arg_size (entry_stmt),
3057 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3059 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3060 false, GSI_CONTINUE_LINKING);
3064 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3065 catch handler and return it. This prevents programs from violating the
3066 structured block semantics with throws. */
3069 maybe_catch_exception (gimple_seq body)
3073 if (!flag_exceptions)
3076 if (lang_protect_cleanup_actions)
3077 t = lang_protect_cleanup_actions ();
3079 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3081 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3082 gimple_eh_filter_set_must_not_throw (f, true);
3084 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3087 return gimple_seq_alloc_with_stmt (t);
3090 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3093 list2chain (tree list)
3097 for (t = list; t; t = TREE_CHAIN (t))
3099 tree var = TREE_VALUE (t);
3101 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3103 TREE_CHAIN (var) = NULL_TREE;
3106 return list ? TREE_VALUE (list) : NULL_TREE;
3110 /* Remove barriers in REGION->EXIT's block. Note that this is only
3111 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3112 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3113 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3117 remove_exit_barrier (struct omp_region *region)
3119 gimple_stmt_iterator gsi;
3120 basic_block exit_bb;
3125 exit_bb = region->exit;
3127 /* If the parallel region doesn't return, we don't have REGION->EXIT
3132 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3133 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3134 statements that can appear in between are extremely limited -- no
3135 memory operations at all. Here, we allow nothing at all, so the
3136 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3137 gsi = gsi_last_bb (exit_bb);
3138 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3140 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3143 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3145 gsi = gsi_last_bb (e->src);
3146 if (gsi_end_p (gsi))
3148 stmt = gsi_stmt (gsi);
3149 if (gimple_code (stmt) == GIMPLE_OMP_RETURN)
3150 gimple_omp_return_set_nowait (stmt);
3155 remove_exit_barriers (struct omp_region *region)
3157 if (region->type == GIMPLE_OMP_PARALLEL)
3158 remove_exit_barrier (region);
3162 region = region->inner;
3163 remove_exit_barriers (region);
3164 while (region->next)
3166 region = region->next;
3167 remove_exit_barriers (region);
3172 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3173 calls. These can't be declared as const functions, but
3174 within one parallel body they are constant, so they can be
3175 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3176 which are declared const. Similarly for task body, except
3177 that in untied task omp_get_thread_num () can change at any task
3178 scheduling point. */
3181 optimize_omp_library_calls (gimple entry_stmt)
3184 gimple_stmt_iterator gsi;
3186 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3188 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3189 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3190 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3191 OMP_CLAUSE_UNTIED) != NULL);
3194 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3196 gimple call = gsi_stmt (gsi);
3199 if (is_gimple_call (call)
3200 && (decl = gimple_call_fndecl (call))
3201 && DECL_EXTERNAL (decl)
3202 && TREE_PUBLIC (decl)
3203 && DECL_INITIAL (decl) == NULL)
3207 if (DECL_NAME (decl) == thr_num_id)
3209 /* In #pragma omp task untied omp_get_thread_num () can change
3210 during the execution of the task region. */
3213 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3215 else if (DECL_NAME (decl) == num_thr_id)
3216 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3220 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3221 || gimple_call_num_args (call) != 0)
3224 if (flag_exceptions && !TREE_NOTHROW (decl))
3227 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3228 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3229 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3232 gimple_call_set_fndecl (call, built_in);
3237 /* Expand the OpenMP parallel or task directive starting at REGION. */
3240 expand_omp_taskreg (struct omp_region *region)
3242 basic_block entry_bb, exit_bb, new_bb;
3243 struct function *child_cfun;
3244 tree child_fn, block, t, ws_args, *tp;
3245 gimple_stmt_iterator gsi;
3246 gimple entry_stmt, stmt;
3249 entry_stmt = last_stmt (region->entry);
3250 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3251 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3252 /* If this function has been already instrumented, make sure
3253 the child function isn't instrumented again. */
3254 child_cfun->after_tree_profile = cfun->after_tree_profile;
3256 entry_bb = region->entry;
3257 exit_bb = region->exit;
3259 if (is_combined_parallel (region))
3260 ws_args = region->ws_args;
3262 ws_args = NULL_TREE;
3264 if (child_cfun->cfg)
3266 /* Due to inlining, it may happen that we have already outlined
3267 the region, in which case all we need to do is make the
3268 sub-graph unreachable and emit the parallel call. */
3269 edge entry_succ_e, exit_succ_e;
3270 gimple_stmt_iterator gsi;
3272 entry_succ_e = single_succ_edge (entry_bb);
3274 gsi = gsi_last_bb (entry_bb);
3275 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3276 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3277 gsi_remove (&gsi, true);
3282 exit_succ_e = single_succ_edge (exit_bb);
3283 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3285 remove_edge_and_dominated_blocks (entry_succ_e);
3289 /* If the parallel region needs data sent from the parent
3290 function, then the very first statement (except possible
3291 tree profile counter updates) of the parallel body
3292 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3293 &.OMP_DATA_O is passed as an argument to the child function,
3294 we need to replace it with the argument as seen by the child
3297 In most cases, this will end up being the identity assignment
3298 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3299 a function call that has been inlined, the original PARM_DECL
3300 .OMP_DATA_I may have been converted into a different local
3301 variable. In which case, we need to keep the assignment. */
3302 if (gimple_omp_taskreg_data_arg (entry_stmt))
3304 basic_block entry_succ_bb = single_succ (entry_bb);
3305 gimple_stmt_iterator gsi;
3307 gimple parcopy_stmt = NULL;
3309 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3313 gcc_assert (!gsi_end_p (gsi));
3314 stmt = gsi_stmt (gsi);
3315 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3318 if (gimple_num_ops (stmt) == 2)
3320 tree arg = gimple_assign_rhs1 (stmt);
3322 /* We're ignore the subcode because we're
3323 effectively doing a STRIP_NOPS. */
3325 if (TREE_CODE (arg) == ADDR_EXPR
3326 && TREE_OPERAND (arg, 0)
3327 == gimple_omp_taskreg_data_arg (entry_stmt))
3329 parcopy_stmt = stmt;
3335 gcc_assert (parcopy_stmt != NULL);
3336 arg = DECL_ARGUMENTS (child_fn);
3338 if (!gimple_in_ssa_p (cfun))
3340 if (gimple_assign_lhs (parcopy_stmt) == arg)
3341 gsi_remove (&gsi, true);
3344 /* ?? Is setting the subcode really necessary ?? */
3345 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3346 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3351 /* If we are in ssa form, we must load the value from the default
3352 definition of the argument. That should not be defined now,
3353 since the argument is not used uninitialized. */
3354 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3355 narg = make_ssa_name (arg, gimple_build_nop ());
3356 set_default_def (arg, narg);
3357 /* ?? Is setting the subcode really necessary ?? */
3358 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3359 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3360 update_stmt (parcopy_stmt);
3364 /* Declare local variables needed in CHILD_CFUN. */
3365 block = DECL_INITIAL (child_fn);
3366 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3367 DECL_SAVED_TREE (child_fn) = NULL;
3368 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3369 TREE_USED (block) = 1;
3371 /* Reset DECL_CONTEXT on function arguments. */
3372 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3373 DECL_CONTEXT (t) = child_fn;
3375 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3376 so that it can be moved to the child function. */
3377 gsi = gsi_last_bb (entry_bb);
3378 stmt = gsi_stmt (gsi);
3379 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3380 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3381 gsi_remove (&gsi, true);
3382 e = split_block (entry_bb, stmt);
3384 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3386 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3389 gsi = gsi_last_bb (exit_bb);
3390 gcc_assert (!gsi_end_p (gsi)
3391 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3392 stmt = gimple_build_return (NULL);
3393 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3394 gsi_remove (&gsi, true);
3397 /* Move the parallel region into CHILD_CFUN. */
3399 if (gimple_in_ssa_p (cfun))
3401 push_cfun (child_cfun);
3402 init_tree_ssa (child_cfun);
3403 init_ssa_operands ();
3404 cfun->gimple_df->in_ssa_p = true;
3409 block = gimple_block (entry_stmt);
3411 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3413 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3415 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3416 for (tp = &child_cfun->local_decls; *tp; )
3417 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3418 tp = &TREE_CHAIN (*tp);
3420 *tp = TREE_CHAIN (*tp);
3422 /* Inform the callgraph about the new function. */
3423 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3424 = cfun->curr_properties;
3425 cgraph_add_new_function (child_fn, true);
3427 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3428 fixed in a following pass. */
3429 push_cfun (child_cfun);
3431 optimize_omp_library_calls (entry_stmt);
3432 rebuild_cgraph_edges ();
3434 /* Some EH regions might become dead, see PR34608. If
3435 pass_cleanup_cfg isn't the first pass to happen with the
3436 new child, these dead EH edges might cause problems.
3437 Clean them up now. */
3438 if (flag_exceptions)
3441 tree save_current = current_function_decl;
3442 bool changed = false;
3444 current_function_decl = child_fn;
3446 changed |= gimple_purge_dead_eh_edges (bb);
3448 cleanup_tree_cfg ();
3449 current_function_decl = save_current;
3454 /* Emit a library call to launch the children threads. */
3455 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3456 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3458 expand_task_call (new_bb, entry_stmt);
3459 update_ssa (TODO_update_ssa_only_virtuals);
3463 /* A subroutine of expand_omp_for. Generate code for a parallel
3464 loop with any schedule. Given parameters:
3466 for (V = N1; V cond N2; V += STEP) BODY;
3468 where COND is "<" or ">", we generate pseudocode
3470 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3471 if (more) goto L0; else goto L3;
3478 if (V cond iend) goto L1; else goto L2;
3480 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3483 If this is a combined omp parallel loop, instead of the call to
3484 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3486 For collapsed loops, given parameters:
3488 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3489 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3490 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3493 we generate pseudocode
3499 count3 = (adj + N32 - N31) / STEP3;
3504 count2 = (adj + N22 - N21) / STEP2;
3509 count1 = (adj + N12 - N11) / STEP1;
3510 count = count1 * count2 * count3;
3511 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3512 if (more) goto L0; else goto L3;
3516 V3 = N31 + (T % count3) * STEP3;
3518 V2 = N21 + (T % count2) * STEP2;
3520 V1 = N11 + T * STEP1;
3525 if (V < iend) goto L10; else goto L2;
3528 if (V3 cond3 N32) goto L1; else goto L11;
3532 if (V2 cond2 N22) goto L1; else goto L12;
3538 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3544 expand_omp_for_generic (struct omp_region *region,
3545 struct omp_for_data *fd,
3546 enum built_in_function start_fn,
3547 enum built_in_function next_fn)
3549 tree type, istart0, iend0, iend;
3550 tree t, vmain, vback, bias = NULL_TREE;
3551 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3552 basic_block l2_bb = NULL, l3_bb = NULL;
3553 gimple_stmt_iterator gsi;
3555 bool in_combined_parallel = is_combined_parallel (region);
3556 bool broken_loop = region->cont == NULL;
3558 tree *counts = NULL;
3561 gcc_assert (!broken_loop || !in_combined_parallel);
3562 gcc_assert (fd->iter_type == long_integer_type_node
3563 || !in_combined_parallel);
3565 type = TREE_TYPE (fd->loop.v);
3566 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3567 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3568 TREE_ADDRESSABLE (istart0) = 1;
3569 TREE_ADDRESSABLE (iend0) = 1;
3570 if (gimple_in_ssa_p (cfun))
3572 add_referenced_var (istart0);
3573 add_referenced_var (iend0);
3576 /* See if we need to bias by LLONG_MIN. */
3577 if (fd->iter_type == long_long_unsigned_type_node
3578 && TREE_CODE (type) == INTEGER_TYPE
3579 && !TYPE_UNSIGNED (type))
3583 if (fd->loop.cond_code == LT_EXPR)
3586 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3590 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3593 if (TREE_CODE (n1) != INTEGER_CST
3594 || TREE_CODE (n2) != INTEGER_CST
3595 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3596 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3599 entry_bb = region->entry;
3600 cont_bb = region->cont;
3602 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3603 gcc_assert (broken_loop
3604 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3605 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3606 l1_bb = single_succ (l0_bb);
3609 l2_bb = create_empty_bb (cont_bb);
3610 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3611 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3615 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3616 exit_bb = region->exit;
3618 gsi = gsi_last_bb (entry_bb);
3620 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3621 if (fd->collapse > 1)
3623 /* collapsed loops need work for expansion in SSA form. */
3624 gcc_assert (!gimple_in_ssa_p (cfun));
3625 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3626 for (i = 0; i < fd->collapse; i++)
3628 tree itype = TREE_TYPE (fd->loops[i].v);
3630 if (POINTER_TYPE_P (itype))
3631 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3632 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3634 t = fold_build2 (PLUS_EXPR, itype,
3635 fold_convert (itype, fd->loops[i].step), t);
3636 t = fold_build2 (PLUS_EXPR, itype, t,
3637 fold_convert (itype, fd->loops[i].n2));
3638 t = fold_build2 (MINUS_EXPR, itype, t,
3639 fold_convert (itype, fd->loops[i].n1));
3640 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3641 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3642 fold_build1 (NEGATE_EXPR, itype, t),
3643 fold_build1 (NEGATE_EXPR, itype,
3644 fold_convert (itype,
3645 fd->loops[i].step)));
3647 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3648 fold_convert (itype, fd->loops[i].step));
3649 t = fold_convert (type, t);
3650 if (TREE_CODE (t) == INTEGER_CST)
3654 counts[i] = create_tmp_var (type, ".count");
3655 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3656 true, GSI_SAME_STMT);
3657 stmt = gimple_build_assign (counts[i], t);
3658 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3660 if (SSA_VAR_P (fd->loop.n2))
3666 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3667 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3668 true, GSI_SAME_STMT);
3670 stmt = gimple_build_assign (fd->loop.n2, t);
3671 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3675 if (in_combined_parallel)
3677 /* In a combined parallel loop, emit a call to
3678 GOMP_loop_foo_next. */
3679 t = build_call_expr (built_in_decls[next_fn], 2,
3680 build_fold_addr_expr (istart0),
3681 build_fold_addr_expr (iend0));
3685 tree t0, t1, t2, t3, t4;
3686 /* If this is not a combined parallel loop, emit a call to
3687 GOMP_loop_foo_start in ENTRY_BB. */
3688 t4 = build_fold_addr_expr (iend0);
3689 t3 = build_fold_addr_expr (istart0);
3690 t2 = fold_convert (fd->iter_type, fd->loop.step);
3691 if (POINTER_TYPE_P (type)
3692 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3694 /* Avoid casting pointers to integer of a different size. */
3696 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3697 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3698 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3702 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3703 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3707 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3708 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3710 if (fd->iter_type == long_integer_type_node)
3714 t = fold_convert (fd->iter_type, fd->chunk_size);
3715 t = build_call_expr (built_in_decls[start_fn], 6,
3716 t0, t1, t2, t, t3, t4);
3719 t = build_call_expr (built_in_decls[start_fn], 5,
3720 t0, t1, t2, t3, t4);
3727 /* The GOMP_loop_ull_*start functions have additional boolean
3728 argument, true for < loops and false for > loops.
3729 In Fortran, the C bool type can be different from
3730 boolean_type_node. */
3731 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3732 t5 = build_int_cst (c_bool_type,
3733 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3736 t = fold_convert (fd->iter_type, fd->chunk_size);
3737 t = build_call_expr (built_in_decls[start_fn], 7,
3738 t5, t0, t1, t2, t, t3, t4);
3741 t = build_call_expr (built_in_decls[start_fn], 6,
3742 t5, t0, t1, t2, t3, t4);
3745 if (TREE_TYPE (t) != boolean_type_node)
3746 t = fold_build2 (NE_EXPR, boolean_type_node,
3747 t, build_int_cst (TREE_TYPE (t), 0));
3748 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3749 true, GSI_SAME_STMT);
3750 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3752 /* Remove the GIMPLE_OMP_FOR statement. */
3753 gsi_remove (&gsi, true);
3755 /* Iteration setup for sequential loop goes in L0_BB. */
3756 gsi = gsi_start_bb (l0_bb);
3758 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3761 t = fold_convert (type, istart0);
3762 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3763 false, GSI_CONTINUE_LINKING);
3764 stmt = gimple_build_assign (fd->loop.v, t);
3765 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3768 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3771 t = fold_convert (type, iend0);
3772 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3773 false, GSI_CONTINUE_LINKING);
3774 if (fd->collapse > 1)
3776 tree tem = create_tmp_var (type, ".tem");
3778 stmt = gimple_build_assign (tem, fd->loop.v);
3779 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3780 for (i = fd->collapse - 1; i >= 0; i--)
3782 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3784 if (POINTER_TYPE_P (vtype))
3785 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3786 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3787 t = fold_convert (itype, t);
3788 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3789 if (POINTER_TYPE_P (vtype))
3790 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3791 fd->loops[i].n1, fold_convert (sizetype, t));
3793 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3794 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3795 false, GSI_CONTINUE_LINKING);
3796 stmt = gimple_build_assign (fd->loops[i].v, t);
3797 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3800 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3801 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3802 false, GSI_CONTINUE_LINKING);
3803 stmt = gimple_build_assign (tem, t);
3804 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3811 /* Code to control the increment and predicate for the sequential
3812 loop goes in the CONT_BB. */
3813 gsi = gsi_last_bb (cont_bb);
3814 stmt = gsi_stmt (gsi);
3815 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3816 vmain = gimple_omp_continue_control_use (stmt);
3817 vback = gimple_omp_continue_control_def (stmt);
3819 if (POINTER_TYPE_P (type))
3820 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3821 fold_convert (sizetype, fd->loop.step));
3823 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3824 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3825 true, GSI_SAME_STMT);
3826 stmt = gimple_build_assign (vback, t);
3827 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3829 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3830 stmt = gimple_build_cond_empty (t);
3831 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3833 /* Remove GIMPLE_OMP_CONTINUE. */
3834 gsi_remove (&gsi, true);
3836 if (fd->collapse > 1)
3838 basic_block last_bb, bb;
3841 for (i = fd->collapse - 1; i >= 0; i--)
3843 tree vtype = TREE_TYPE (fd->loops[i].v);
3845 bb = create_empty_bb (last_bb);
3846 gsi = gsi_start_bb (bb);
3848 if (i < fd->collapse - 1)
3850 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3851 e->probability = REG_BR_PROB_BASE / 8;
3853 t = fd->loops[i + 1].n1;
3854 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3855 false, GSI_CONTINUE_LINKING);
3856 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3857 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3862 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3864 if (POINTER_TYPE_P (vtype))
3865 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3867 fold_convert (sizetype, fd->loops[i].step));
3869 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3871 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3872 false, GSI_CONTINUE_LINKING);
3873 stmt = gimple_build_assign (fd->loops[i].v, t);
3874 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3878 t = fd->loops[i].n2;
3879 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3880 false, GSI_CONTINUE_LINKING);
3881 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3883 stmt = gimple_build_cond_empty (t);
3884 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3885 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3886 e->probability = REG_BR_PROB_BASE * 7 / 8;
3889 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3894 /* Emit code to get the next parallel iteration in L2_BB. */
3895 gsi = gsi_start_bb (l2_bb);
3897 t = build_call_expr (built_in_decls[next_fn], 2,
3898 build_fold_addr_expr (istart0),
3899 build_fold_addr_expr (iend0));
3900 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3901 false, GSI_CONTINUE_LINKING);
3902 if (TREE_TYPE (t) != boolean_type_node)
3903 t = fold_build2 (NE_EXPR, boolean_type_node,
3904 t, build_int_cst (TREE_TYPE (t), 0));
3905 stmt = gimple_build_cond_empty (t);
3906 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3909 /* Add the loop cleanup function. */
3910 gsi = gsi_last_bb (exit_bb);
3911 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3912 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3914 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3915 stmt = gimple_build_call (t, 0);
3916 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3917 gsi_remove (&gsi, true);
3919 /* Connect the new blocks. */
3920 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3921 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3927 e = find_edge (cont_bb, l3_bb);
3928 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3930 phis = phi_nodes (l3_bb);
3931 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3933 gimple phi = gsi_stmt (gsi);
3934 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3935 PHI_ARG_DEF_FROM_EDGE (phi, e));
3939 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3940 if (fd->collapse > 1)
3942 e = find_edge (cont_bb, l1_bb);
3944 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3948 e = find_edge (cont_bb, l1_bb);
3949 e->flags = EDGE_TRUE_VALUE;
3951 e->probability = REG_BR_PROB_BASE * 7 / 8;
3952 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
3953 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
3955 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
3956 recompute_dominator (CDI_DOMINATORS, l2_bb));
3957 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
3958 recompute_dominator (CDI_DOMINATORS, l3_bb));
3959 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
3960 recompute_dominator (CDI_DOMINATORS, l0_bb));
3961 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
3962 recompute_dominator (CDI_DOMINATORS, l1_bb));
3967 /* A subroutine of expand_omp_for. Generate code for a parallel
3968 loop with static schedule and no specified chunk size. Given
3971 for (V = N1; V cond N2; V += STEP) BODY;
3973 where COND is "<" or ">", we generate pseudocode
3979 if ((__typeof (V)) -1 > 0 && cond is >)
3980 n = -(adj + N2 - N1) / -STEP;
3982 n = (adj + N2 - N1) / STEP;
3984 q += (q * nthreads != n);
3986 e0 = min(s0 + q, n);
3988 if (s0 >= e0) goto L2; else goto L0;
3994 if (V cond e) goto L1;
3999 expand_omp_for_static_nochunk (struct omp_region *region,
4000 struct omp_for_data *fd)
4002 tree n, q, s0, e0, e, t, nthreads, threadid;
4003 tree type, itype, vmain, vback;
4004 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4006 gimple_stmt_iterator gsi;
4009 itype = type = TREE_TYPE (fd->loop.v);
4010 if (POINTER_TYPE_P (type))
4011 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4013 entry_bb = region->entry;
4014 cont_bb = region->cont;
4015 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4016 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4017 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4018 body_bb = single_succ (seq_start_bb);
4019 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4020 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4021 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4022 exit_bb = region->exit;
4024 /* Iteration space partitioning goes in ENTRY_BB. */
4025 gsi = gsi_last_bb (entry_bb);
4026 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4028 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4029 t = fold_convert (itype, t);
4030 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4031 true, GSI_SAME_STMT);
4033 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4034 t = fold_convert (itype, t);
4035 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4036 true, GSI_SAME_STMT);
4039 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4040 true, NULL_TREE, true, GSI_SAME_STMT);
4042 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4043 true, NULL_TREE, true, GSI_SAME_STMT);
4045 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4046 true, NULL_TREE, true, GSI_SAME_STMT);
4048 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4049 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4050 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4051 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4052 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4053 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4054 fold_build1 (NEGATE_EXPR, itype, t),
4055 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4057 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4058 t = fold_convert (itype, t);
4059 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4061 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4062 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4064 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4065 t = fold_build2 (NE_EXPR, itype, t, n);
4066 t = fold_build2 (PLUS_EXPR, itype, q, t);
4067 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4069 t = build2 (MULT_EXPR, itype, q, threadid);
4070 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4072 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4073 t = fold_build2 (MIN_EXPR, itype, t, n);
4074 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4076 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4077 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4079 /* Remove the GIMPLE_OMP_FOR statement. */
4080 gsi_remove (&gsi, true);
4082 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4083 gsi = gsi_start_bb (seq_start_bb);
4085 t = fold_convert (itype, s0);
4086 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4087 if (POINTER_TYPE_P (type))
4088 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4089 fold_convert (sizetype, t));
4091 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4092 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4093 false, GSI_CONTINUE_LINKING);
4094 stmt = gimple_build_assign (fd->loop.v, t);
4095 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4097 t = fold_convert (itype, e0);
4098 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4099 if (POINTER_TYPE_P (type))
4100 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4101 fold_convert (sizetype, t));
4103 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4104 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4105 false, GSI_CONTINUE_LINKING);
4107 /* The code controlling the sequential loop replaces the
4108 GIMPLE_OMP_CONTINUE. */
4109 gsi = gsi_last_bb (cont_bb);
4110 stmt = gsi_stmt (gsi);
4111 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4112 vmain = gimple_omp_continue_control_use (stmt);
4113 vback = gimple_omp_continue_control_def (stmt);
4115 if (POINTER_TYPE_P (type))
4116 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4117 fold_convert (sizetype, fd->loop.step));
4119 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4120 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4121 true, GSI_SAME_STMT);
4122 stmt = gimple_build_assign (vback, t);
4123 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4125 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4126 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4128 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4129 gsi_remove (&gsi, true);
4131 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4132 gsi = gsi_last_bb (exit_bb);
4133 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4134 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4135 false, GSI_SAME_STMT);
4136 gsi_remove (&gsi, true);
4138 /* Connect all the blocks. */
4139 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4140 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4142 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4143 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4145 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4146 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4147 recompute_dominator (CDI_DOMINATORS, body_bb));
4148 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4149 recompute_dominator (CDI_DOMINATORS, fin_bb));
4153 /* A subroutine of expand_omp_for. Generate code for a parallel
4154 loop with static schedule and a specified chunk size. Given
4157 for (V = N1; V cond N2; V += STEP) BODY;
4159 where COND is "<" or ">", we generate pseudocode
4165 if ((__typeof (V)) -1 > 0 && cond is >)
4166 n = -(adj + N2 - N1) / -STEP;
4168 n = (adj + N2 - N1) / STEP;
4170 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4171 here so that V is defined
4172 if the loop is not entered
4174 s0 = (trip * nthreads + threadid) * CHUNK;
4175 e0 = min(s0 + CHUNK, n);
4176 if (s0 < n) goto L1; else goto L4;
4183 if (V cond e) goto L2; else goto L3;
4191 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4193 tree n, s0, e0, e, t;
4194 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4195 tree type, itype, v_main, v_back, v_extra;
4196 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4197 basic_block trip_update_bb, cont_bb, fin_bb;
4198 gimple_stmt_iterator si;
4202 itype = type = TREE_TYPE (fd->loop.v);
4203 if (POINTER_TYPE_P (type))
4204 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4206 entry_bb = region->entry;
4207 se = split_block (entry_bb, last_stmt (entry_bb));
4209 iter_part_bb = se->dest;
4210 cont_bb = region->cont;
4211 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4212 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4213 == FALLTHRU_EDGE (cont_bb)->dest);
4214 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4215 body_bb = single_succ (seq_start_bb);
4216 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4217 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4218 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4219 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4220 exit_bb = region->exit;
4222 /* Trip and adjustment setup goes in ENTRY_BB. */
4223 si = gsi_last_bb (entry_bb);
4224 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4226 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4227 t = fold_convert (itype, t);
4228 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4229 true, GSI_SAME_STMT);
4231 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4232 t = fold_convert (itype, t);
4233 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4234 true, GSI_SAME_STMT);
4237 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4238 true, NULL_TREE, true, GSI_SAME_STMT);
4240 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4241 true, NULL_TREE, true, GSI_SAME_STMT);
4243 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4244 true, NULL_TREE, true, GSI_SAME_STMT);
4246 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4247 true, NULL_TREE, true, GSI_SAME_STMT);
4249 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4250 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4251 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4252 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4253 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4254 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4255 fold_build1 (NEGATE_EXPR, itype, t),
4256 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4258 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4259 t = fold_convert (itype, t);
4260 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4261 true, GSI_SAME_STMT);
4263 trip_var = create_tmp_var (itype, ".trip");
4264 if (gimple_in_ssa_p (cfun))
4266 add_referenced_var (trip_var);
4267 trip_init = make_ssa_name (trip_var, NULL);
4268 trip_main = make_ssa_name (trip_var, NULL);
4269 trip_back = make_ssa_name (trip_var, NULL);
4273 trip_init = trip_var;
4274 trip_main = trip_var;
4275 trip_back = trip_var;
4278 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4279 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4281 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4282 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4283 if (POINTER_TYPE_P (type))
4284 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4285 fold_convert (sizetype, t));
4287 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4288 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4289 true, GSI_SAME_STMT);
4291 /* Remove the GIMPLE_OMP_FOR. */
4292 gsi_remove (&si, true);
4294 /* Iteration space partitioning goes in ITER_PART_BB. */
4295 si = gsi_last_bb (iter_part_bb);
4297 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4298 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4299 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4300 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4301 false, GSI_CONTINUE_LINKING);
4303 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4304 t = fold_build2 (MIN_EXPR, itype, t, n);
4305 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4306 false, GSI_CONTINUE_LINKING);
4308 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4309 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4311 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4312 si = gsi_start_bb (seq_start_bb);
4314 t = fold_convert (itype, s0);
4315 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4316 if (POINTER_TYPE_P (type))
4317 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4318 fold_convert (sizetype, t));
4320 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4321 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4322 false, GSI_CONTINUE_LINKING);
4323 stmt = gimple_build_assign (fd->loop.v, t);
4324 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4326 t = fold_convert (itype, e0);
4327 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4328 if (POINTER_TYPE_P (type))
4329 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4330 fold_convert (sizetype, t));
4332 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4333 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4334 false, GSI_CONTINUE_LINKING);
4336 /* The code controlling the sequential loop goes in CONT_BB,
4337 replacing the GIMPLE_OMP_CONTINUE. */
4338 si = gsi_last_bb (cont_bb);
4339 stmt = gsi_stmt (si);
4340 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4341 v_main = gimple_omp_continue_control_use (stmt);
4342 v_back = gimple_omp_continue_control_def (stmt);
4344 if (POINTER_TYPE_P (type))
4345 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4346 fold_convert (sizetype, fd->loop.step));
4348 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4349 stmt = gimple_build_assign (v_back, t);
4350 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4352 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4353 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4355 /* Remove GIMPLE_OMP_CONTINUE. */
4356 gsi_remove (&si, true);
4358 /* Trip update code goes into TRIP_UPDATE_BB. */
4359 si = gsi_start_bb (trip_update_bb);
4361 t = build_int_cst (itype, 1);
4362 t = build2 (PLUS_EXPR, itype, trip_main, t);
4363 stmt = gimple_build_assign (trip_back, t);
4364 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4366 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4367 si = gsi_last_bb (exit_bb);
4368 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4369 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4370 false, GSI_SAME_STMT);
4371 gsi_remove (&si, true);
4373 /* Connect the new blocks. */
4374 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4375 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4377 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4378 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4380 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4382 if (gimple_in_ssa_p (cfun))
4384 gimple_stmt_iterator psi;
4387 edge_var_map_vector head;
4391 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4392 remove arguments of the phi nodes in fin_bb. We need to create
4393 appropriate phi nodes in iter_part_bb instead. */
4394 se = single_pred_edge (fin_bb);
4395 re = single_succ_edge (trip_update_bb);
4396 head = redirect_edge_var_map_vector (re);
4397 ene = single_succ_edge (entry_bb);
4399 psi = gsi_start_phis (fin_bb);
4400 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4401 gsi_next (&psi), ++i)
4405 phi = gsi_stmt (psi);
4406 t = gimple_phi_result (phi);
4407 gcc_assert (t == redirect_edge_var_map_result (vm));
4408 nphi = create_phi_node (t, iter_part_bb);
4409 SSA_NAME_DEF_STMT (t) = nphi;
4411 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4412 /* A special case -- fd->loop.v is not yet computed in
4413 iter_part_bb, we need to use v_extra instead. */
4414 if (t == fd->loop.v)
4416 add_phi_arg (nphi, t, ene);
4417 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4419 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4420 redirect_edge_var_map_clear (re);
4423 psi = gsi_start_phis (fin_bb);
4424 if (gsi_end_p (psi))
4426 remove_phi_node (&psi, false);
4429 /* Make phi node for trip. */
4430 phi = create_phi_node (trip_main, iter_part_bb);
4431 SSA_NAME_DEF_STMT (trip_main) = phi;
4432 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4433 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4436 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4437 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4438 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4439 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4440 recompute_dominator (CDI_DOMINATORS, fin_bb));
4441 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4442 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4443 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4444 recompute_dominator (CDI_DOMINATORS, body_bb));
4448 /* Expand the OpenMP loop defined by REGION. */
4451 expand_omp_for (struct omp_region *region)
4453 struct omp_for_data fd;
4454 struct omp_for_data_loop *loops;
4457 = (struct omp_for_data_loop *)
4458 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4459 * sizeof (struct omp_for_data_loop));
4460 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4461 region->sched_kind = fd.sched_kind;
4463 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4464 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4465 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4468 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4469 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4470 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4473 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4476 && region->cont != NULL)
4478 if (fd.chunk_size == NULL)
4479 expand_omp_for_static_nochunk (region, &fd);
4481 expand_omp_for_static_chunk (region, &fd);
4485 int fn_index, start_ix, next_ix;
4487 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4488 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4489 ? 3 : fd.sched_kind;
4490 fn_index += fd.have_ordered * 4;
4491 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4492 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4493 if (fd.iter_type == long_long_unsigned_type_node)
4495 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4496 - BUILT_IN_GOMP_LOOP_STATIC_START;
4497 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4498 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4500 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4503 update_ssa (TODO_update_ssa_only_virtuals);
4507 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4509 v = GOMP_sections_start (n);
4526 v = GOMP_sections_next ();
4531 If this is a combined parallel sections, replace the call to
4532 GOMP_sections_start with call to GOMP_sections_next. */
4535 expand_omp_sections (struct omp_region *region)
4537 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4538 VEC (tree,heap) *label_vec;
4540 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4541 gimple_stmt_iterator si, switch_si;
4542 gimple sections_stmt, stmt, cont;
4545 struct omp_region *inner;
4547 bool exit_reachable = region->cont != NULL;
4549 gcc_assert (exit_reachable == (region->exit != NULL));
4550 entry_bb = region->entry;
4551 l0_bb = single_succ (entry_bb);
4552 l1_bb = region->cont;
4553 l2_bb = region->exit;
4556 if (single_pred (l2_bb) == l0_bb)
4557 l2 = gimple_block_label (l2_bb);
4560 /* This can happen if there are reductions. */
4561 len = EDGE_COUNT (l0_bb->succs);
4562 gcc_assert (len > 0);
4563 e = EDGE_SUCC (l0_bb, len - 1);
4564 si = gsi_last_bb (e->dest);
4567 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4568 l2 = gimple_block_label (e->dest);
4570 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4572 si = gsi_last_bb (e->dest);
4574 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4576 l2 = gimple_block_label (e->dest);
4581 default_bb = create_empty_bb (l1_bb->prev_bb);
4582 l1 = gimple_block_label (l1_bb);
4586 default_bb = create_empty_bb (l0_bb);
4588 l2 = gimple_block_label (default_bb);
4591 /* We will build a switch() with enough cases for all the
4592 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4593 and a default case to abort if something goes wrong. */
4594 len = EDGE_COUNT (l0_bb->succs);
4596 /* Use VEC_quick_push on label_vec throughout, since we know the size
4598 label_vec = VEC_alloc (tree, heap, len);
4600 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4601 GIMPLE_OMP_SECTIONS statement. */
4602 si = gsi_last_bb (entry_bb);
4603 sections_stmt = gsi_stmt (si);
4604 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4605 vin = gimple_omp_sections_control (sections_stmt);
4606 if (!is_combined_parallel (region))
4608 /* If we are not inside a combined parallel+sections region,
4609 call GOMP_sections_start. */
4610 t = build_int_cst (unsigned_type_node,
4611 exit_reachable ? len - 1 : len);
4612 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4613 stmt = gimple_build_call (u, 1, t);
4617 /* Otherwise, call GOMP_sections_next. */
4618 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4619 stmt = gimple_build_call (u, 0);
4621 gimple_call_set_lhs (stmt, vin);
4622 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4623 gsi_remove (&si, true);
4625 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4627 switch_si = gsi_last_bb (l0_bb);
4628 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4631 cont = last_stmt (l1_bb);
4632 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4633 vmain = gimple_omp_continue_control_use (cont);
4634 vnext = gimple_omp_continue_control_def (cont);
4645 t = build3 (CASE_LABEL_EXPR, void_type_node,
4646 build_int_cst (unsigned_type_node, 0), NULL, l2);
4647 VEC_quick_push (tree, label_vec, t);
4651 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4652 for (inner = region->inner, casei = 1;
4654 inner = inner->next, i++, casei++)
4656 basic_block s_entry_bb, s_exit_bb;
4658 /* Skip optional reduction region. */
4659 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4666 s_entry_bb = inner->entry;
4667 s_exit_bb = inner->exit;
4669 t = gimple_block_label (s_entry_bb);
4670 u = build_int_cst (unsigned_type_node, casei);
4671 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4672 VEC_quick_push (tree, label_vec, u);
4674 si = gsi_last_bb (s_entry_bb);
4675 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4676 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4677 gsi_remove (&si, true);
4678 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4680 if (s_exit_bb == NULL)
4683 si = gsi_last_bb (s_exit_bb);
4684 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4685 gsi_remove (&si, true);
4687 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4690 /* Error handling code goes in DEFAULT_BB. */
4691 t = gimple_block_label (default_bb);
4692 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4693 make_edge (l0_bb, default_bb, 0);
4695 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4696 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4697 gsi_remove (&switch_si, true);
4698 VEC_free (tree, heap, label_vec);
4700 si = gsi_start_bb (default_bb);
4701 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4702 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4706 /* Code to get the next section goes in L1_BB. */
4707 si = gsi_last_bb (l1_bb);
4708 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4710 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4711 gimple_call_set_lhs (stmt, vnext);
4712 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4713 gsi_remove (&si, true);
4715 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4717 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4718 si = gsi_last_bb (l2_bb);
4719 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4720 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4722 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4723 stmt = gimple_build_call (t, 0);
4724 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4725 gsi_remove (&si, true);
4728 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4732 /* Expand code for an OpenMP single directive. We've already expanded
4733 much of the code, here we simply place the GOMP_barrier call. */
4736 expand_omp_single (struct omp_region *region)
4738 basic_block entry_bb, exit_bb;
4739 gimple_stmt_iterator si;
4740 bool need_barrier = false;
4742 entry_bb = region->entry;
4743 exit_bb = region->exit;
4745 si = gsi_last_bb (entry_bb);
4746 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4747 be removed. We need to ensure that the thread that entered the single
4748 does not exit before the data is copied out by the other threads. */
4749 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4750 OMP_CLAUSE_COPYPRIVATE))
4751 need_barrier = true;
4752 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4753 gsi_remove (&si, true);
4754 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4756 si = gsi_last_bb (exit_bb);
4757 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4758 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4759 false, GSI_SAME_STMT);
4760 gsi_remove (&si, true);
4761 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4765 /* Generic expansion for OpenMP synchronization directives: master,
4766 ordered and critical. All we need to do here is remove the entry
4767 and exit markers for REGION. */
4770 expand_omp_synch (struct omp_region *region)
4772 basic_block entry_bb, exit_bb;
4773 gimple_stmt_iterator si;
4775 entry_bb = region->entry;
4776 exit_bb = region->exit;
4778 si = gsi_last_bb (entry_bb);
4779 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4780 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4781 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4782 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4783 gsi_remove (&si, true);
4784 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4788 si = gsi_last_bb (exit_bb);
4789 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4790 gsi_remove (&si, true);
4791 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4795 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4796 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4797 size of the data type, and thus usable to find the index of the builtin
4798 decl. Returns false if the expression is not of the proper form. */
4801 expand_omp_atomic_fetch_op (basic_block load_bb,
4802 tree addr, tree loaded_val,
4803 tree stored_val, int index)
4805 enum built_in_function base;
4806 tree decl, itype, call;
4807 enum insn_code *optab;
4809 basic_block store_bb = single_succ (load_bb);
4810 gimple_stmt_iterator gsi;
4813 /* We expect to find the following sequences:
4816 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4819 val = tmp OP something; (or: something OP tmp)
4820 GIMPLE_OMP_STORE (val)
4822 ???FIXME: Allow a more flexible sequence.
4823 Perhaps use data flow to pick the statements.
4827 gsi = gsi_after_labels (store_bb);
4828 stmt = gsi_stmt (gsi);
4829 if (!is_gimple_assign (stmt))
4832 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4835 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4838 /* Check for one of the supported fetch-op operations. */
4839 switch (gimple_assign_rhs_code (stmt))
4842 case POINTER_PLUS_EXPR:
4843 base = BUILT_IN_FETCH_AND_ADD_N;
4844 optab = sync_add_optab;
4847 base = BUILT_IN_FETCH_AND_SUB_N;
4848 optab = sync_add_optab;
4851 base = BUILT_IN_FETCH_AND_AND_N;
4852 optab = sync_and_optab;
4855 base = BUILT_IN_FETCH_AND_OR_N;
4856 optab = sync_ior_optab;
4859 base = BUILT_IN_FETCH_AND_XOR_N;
4860 optab = sync_xor_optab;
4865 /* Make sure the expression is of the proper form. */
4866 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4867 rhs = gimple_assign_rhs2 (stmt);
4868 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4869 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4870 rhs = gimple_assign_rhs1 (stmt);
4874 decl = built_in_decls[base + index + 1];
4875 itype = TREE_TYPE (TREE_TYPE (decl));
4877 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4880 gsi = gsi_last_bb (load_bb);
4881 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4882 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4883 call = fold_convert (void_type_node, call);
4884 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4885 gsi_remove (&gsi, true);
4887 gsi = gsi_last_bb (store_bb);
4888 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4889 gsi_remove (&gsi, true);
4890 gsi = gsi_last_bb (store_bb);
4891 gsi_remove (&gsi, true);
4893 if (gimple_in_ssa_p (cfun))
4894 update_ssa (TODO_update_ssa_no_phi);
4899 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4903 newval = rhs; // with oldval replacing *addr in rhs
4904 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4905 if (oldval != newval)
4908 INDEX is log2 of the size of the data type, and thus usable to find the
4909 index of the builtin decl. */
4912 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4913 tree addr, tree loaded_val, tree stored_val,
4916 tree loadedi, storedi, initial, new_storedi, old_vali;
4917 tree type, itype, cmpxchg, iaddr;
4918 gimple_stmt_iterator si;
4919 basic_block loop_header = single_succ (load_bb);
4923 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4924 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4925 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4927 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4930 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4931 si = gsi_last_bb (load_bb);
4932 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4934 /* For floating-point values, we'll need to view-convert them to integers
4935 so that we can perform the atomic compare and swap. Simplify the
4936 following code by always setting up the "i"ntegral variables. */
4937 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
4941 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
4943 = force_gimple_operand_gsi (&si,
4944 fold_convert (TREE_TYPE (iaddr), addr),
4945 false, NULL_TREE, true, GSI_SAME_STMT);
4946 stmt = gimple_build_assign (iaddr, iaddr_val);
4947 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4948 DECL_NO_TBAA_P (iaddr) = 1;
4949 DECL_POINTER_ALIAS_SET (iaddr) = 0;
4950 loadedi = create_tmp_var (itype, NULL);
4951 if (gimple_in_ssa_p (cfun))
4953 add_referenced_var (iaddr);
4954 add_referenced_var (loadedi);
4955 loadedi = make_ssa_name (loadedi, NULL);
4961 loadedi = loaded_val;
4964 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
4965 true, NULL_TREE, true, GSI_SAME_STMT);
4967 /* Move the value to the LOADEDI temporary. */
4968 if (gimple_in_ssa_p (cfun))
4970 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
4971 phi = create_phi_node (loadedi, loop_header);
4972 SSA_NAME_DEF_STMT (loadedi) = phi;
4973 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
4977 gsi_insert_before (&si,
4978 gimple_build_assign (loadedi, initial),
4980 if (loadedi != loaded_val)
4982 gimple_stmt_iterator gsi2;
4985 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
4986 gsi2 = gsi_start_bb (loop_header);
4987 if (gimple_in_ssa_p (cfun))
4990 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
4991 true, GSI_SAME_STMT);
4992 stmt = gimple_build_assign (loaded_val, x);
4993 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
4997 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
4998 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
4999 true, GSI_SAME_STMT);
5002 gsi_remove (&si, true);
5004 si = gsi_last_bb (store_bb);
5005 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5008 storedi = stored_val;
5011 force_gimple_operand_gsi (&si,
5012 build1 (VIEW_CONVERT_EXPR, itype,
5013 stored_val), true, NULL_TREE, true,
5016 /* Build the compare&swap statement. */
5017 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5018 new_storedi = force_gimple_operand_gsi (&si,
5019 fold_convert (itype, new_storedi),
5021 true, GSI_SAME_STMT);
5023 if (gimple_in_ssa_p (cfun))
5027 old_vali = create_tmp_var (itype, NULL);
5028 if (gimple_in_ssa_p (cfun))
5029 add_referenced_var (old_vali);
5030 stmt = gimple_build_assign (old_vali, loadedi);
5031 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5033 stmt = gimple_build_assign (loadedi, new_storedi);
5034 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5037 /* Note that we always perform the comparison as an integer, even for
5038 floating point. This allows the atomic operation to properly
5039 succeed even with NaNs and -0.0. */
5040 stmt = gimple_build_cond_empty
5041 (build2 (NE_EXPR, boolean_type_node,
5042 new_storedi, old_vali));
5043 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5046 e = single_succ_edge (store_bb);
5047 e->flags &= ~EDGE_FALLTHRU;
5048 e->flags |= EDGE_FALSE_VALUE;
5050 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5052 /* Copy the new value to loadedi (we already did that before the condition
5053 if we are not in SSA). */
5054 if (gimple_in_ssa_p (cfun))
5056 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5057 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5060 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5061 gsi_remove (&si, true);
5063 if (gimple_in_ssa_p (cfun))
5064 update_ssa (TODO_update_ssa_no_phi);
5069 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5071 GOMP_atomic_start ();
5075 The result is not globally atomic, but works so long as all parallel
5076 references are within #pragma omp atomic directives. According to
5077 responses received from omp@openmp.org, appears to be within spec.
5078 Which makes sense, since that's how several other compilers handle
5079 this situation as well.
5080 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5081 expanding. STORED_VAL is the operand of the matching
5082 GIMPLE_OMP_ATOMIC_STORE.
5085 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5089 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5094 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5095 tree addr, tree loaded_val, tree stored_val)
5097 gimple_stmt_iterator si;
5101 si = gsi_last_bb (load_bb);
5102 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5104 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5105 t = build_function_call_expr (t, 0);
5106 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5108 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5109 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5110 gsi_remove (&si, true);
5112 si = gsi_last_bb (store_bb);
5113 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5115 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5117 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5119 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5120 t = build_function_call_expr (t, 0);
5121 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5122 gsi_remove (&si, true);
5124 if (gimple_in_ssa_p (cfun))
5125 update_ssa (TODO_update_ssa_no_phi);
5129 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5130 using expand_omp_atomic_fetch_op. If it failed, we try to
5131 call expand_omp_atomic_pipeline, and if it fails too, the
5132 ultimate fallback is wrapping the operation in a mutex
5133 (expand_omp_atomic_mutex). REGION is the atomic region built
5134 by build_omp_regions_1(). */
5137 expand_omp_atomic (struct omp_region *region)
5139 basic_block load_bb = region->entry, store_bb = region->exit;
5140 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5141 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5142 tree addr = gimple_omp_atomic_load_rhs (load);
5143 tree stored_val = gimple_omp_atomic_store_val (store);
5144 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5145 HOST_WIDE_INT index;
5147 /* Make sure the type is one of the supported sizes. */
5148 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5149 index = exact_log2 (index);
5150 if (index >= 0 && index <= 4)
5152 unsigned int align = TYPE_ALIGN_UNIT (type);
5154 /* __sync builtins require strict data alignment. */
5155 if (exact_log2 (align) >= index)
5157 /* When possible, use specialized atomic update functions. */
5158 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5159 && store_bb == single_succ (load_bb))
5161 if (expand_omp_atomic_fetch_op (load_bb, addr,
5162 loaded_val, stored_val, index))
5166 /* If we don't have specialized __sync builtins, try and implement
5167 as a compare and swap loop. */
5168 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5169 loaded_val, stored_val, index))
5174 /* The ultimate fallback is wrapping the operation in a mutex. */
5175 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5179 /* Expand the parallel region tree rooted at REGION. Expansion
5180 proceeds in depth-first order. Innermost regions are expanded
5181 first. This way, parallel regions that require a new function to
5182 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5183 internal dependencies in their body. */
5186 expand_omp (struct omp_region *region)
5190 location_t saved_location;
5192 /* First, determine whether this is a combined parallel+workshare
5194 if (region->type == GIMPLE_OMP_PARALLEL)
5195 determine_parallel_type (region);
5198 expand_omp (region->inner);
5200 saved_location = input_location;
5201 if (gimple_has_location (last_stmt (region->entry)))
5202 input_location = gimple_location (last_stmt (region->entry));
5204 switch (region->type)
5206 case GIMPLE_OMP_PARALLEL:
5207 case GIMPLE_OMP_TASK:
5208 expand_omp_taskreg (region);
5211 case GIMPLE_OMP_FOR:
5212 expand_omp_for (region);
5215 case GIMPLE_OMP_SECTIONS:
5216 expand_omp_sections (region);
5219 case GIMPLE_OMP_SECTION:
5220 /* Individual omp sections are handled together with their
5221 parent GIMPLE_OMP_SECTIONS region. */
5224 case GIMPLE_OMP_SINGLE:
5225 expand_omp_single (region);
5228 case GIMPLE_OMP_MASTER:
5229 case GIMPLE_OMP_ORDERED:
5230 case GIMPLE_OMP_CRITICAL:
5231 expand_omp_synch (region);
5234 case GIMPLE_OMP_ATOMIC_LOAD:
5235 expand_omp_atomic (region);
5242 input_location = saved_location;
5243 region = region->next;
5248 /* Helper for build_omp_regions. Scan the dominator tree starting at
5249 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5250 true, the function ends once a single tree is built (otherwise, whole
5251 forest of OMP constructs may be built). */
5254 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5257 gimple_stmt_iterator gsi;
5261 gsi = gsi_last_bb (bb);
5262 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5264 struct omp_region *region;
5265 enum gimple_code code;
5267 stmt = gsi_stmt (gsi);
5268 code = gimple_code (stmt);
5269 if (code == GIMPLE_OMP_RETURN)
5271 /* STMT is the return point out of region PARENT. Mark it
5272 as the exit point and make PARENT the immediately
5273 enclosing region. */
5274 gcc_assert (parent);
5277 parent = parent->outer;
5279 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5281 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5282 GIMPLE_OMP_RETURN, but matches with
5283 GIMPLE_OMP_ATOMIC_LOAD. */
5284 gcc_assert (parent);
5285 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5288 parent = parent->outer;
5291 else if (code == GIMPLE_OMP_CONTINUE)
5293 gcc_assert (parent);
5296 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5298 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5299 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5304 /* Otherwise, this directive becomes the parent for a new
5306 region = new_omp_region (bb, code, parent);
5311 if (single_tree && !parent)
5314 for (son = first_dom_son (CDI_DOMINATORS, bb);
5316 son = next_dom_son (CDI_DOMINATORS, son))
5317 build_omp_regions_1 (son, parent, single_tree);
5320 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5324 build_omp_regions_root (basic_block root)
5326 gcc_assert (root_omp_region == NULL);
5327 build_omp_regions_1 (root, NULL, true);
5328 gcc_assert (root_omp_region != NULL);
5331 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5334 omp_expand_local (basic_block head)
5336 build_omp_regions_root (head);
5337 if (dump_file && (dump_flags & TDF_DETAILS))
5339 fprintf (dump_file, "\nOMP region tree\n\n");
5340 dump_omp_region (dump_file, root_omp_region, 0);
5341 fprintf (dump_file, "\n");
5344 remove_exit_barriers (root_omp_region);
5345 expand_omp (root_omp_region);
5347 free_omp_regions ();
5350 /* Scan the CFG and build a tree of OMP regions. Return the root of
5351 the OMP region tree. */
5354 build_omp_regions (void)
5356 gcc_assert (root_omp_region == NULL);
5357 calculate_dominance_info (CDI_DOMINATORS);
5358 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5361 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5364 execute_expand_omp (void)
5366 build_omp_regions ();
5368 if (!root_omp_region)
5373 fprintf (dump_file, "\nOMP region tree\n\n");
5374 dump_omp_region (dump_file, root_omp_region, 0);
5375 fprintf (dump_file, "\n");
5378 remove_exit_barriers (root_omp_region);
5380 expand_omp (root_omp_region);
5382 cleanup_tree_cfg ();
5384 free_omp_regions ();
5389 /* OMP expansion -- the default pass, run before creation of SSA form. */
5392 gate_expand_omp (void)
5394 return (flag_openmp != 0 && errorcount == 0);
5397 struct gimple_opt_pass pass_expand_omp =
5401 "ompexp", /* name */
5402 gate_expand_omp, /* gate */
5403 execute_expand_omp, /* execute */
5406 0, /* static_pass_number */
5408 PROP_gimple_any, /* properties_required */
5409 PROP_gimple_lomp, /* properties_provided */
5410 0, /* properties_destroyed */
5411 0, /* todo_flags_start */
5412 TODO_dump_func /* todo_flags_finish */
5416 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5418 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5419 CTX is the enclosing OMP context for the current statement. */
5422 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5424 tree block, control;
5425 gimple_stmt_iterator tgsi;
5427 gimple stmt, new_stmt, bind, t;
5428 gimple_seq ilist, dlist, olist, new_body, body;
5429 struct gimplify_ctx gctx;
5431 stmt = gsi_stmt (*gsi_p);
5433 push_gimplify_context (&gctx);
5437 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5438 &ilist, &dlist, ctx);
5440 tgsi = gsi_start (gimple_omp_body (stmt));
5441 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5444 tgsi = gsi_start (gimple_omp_body (stmt));
5446 for (i = 0; i < len; i++, gsi_next (&tgsi))
5451 sec_start = gsi_stmt (tgsi);
5452 sctx = maybe_lookup_ctx (sec_start);
5455 gimple_seq_add_stmt (&body, sec_start);
5457 lower_omp (gimple_omp_body (sec_start), sctx);
5458 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5459 gimple_omp_set_body (sec_start, NULL);
5463 gimple_seq l = NULL;
5464 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5466 gimple_seq_add_seq (&body, l);
5467 gimple_omp_section_set_last (sec_start);
5470 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5473 block = make_node (BLOCK);
5474 bind = gimple_build_bind (NULL, body, block);
5477 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5479 block = make_node (BLOCK);
5480 new_stmt = gimple_build_bind (NULL, NULL, block);
5482 pop_gimplify_context (new_stmt);
5483 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5484 BLOCK_VARS (block) = gimple_bind_vars (bind);
5485 if (BLOCK_VARS (block))
5486 TREE_USED (block) = 1;
5489 gimple_seq_add_seq (&new_body, ilist);
5490 gimple_seq_add_stmt (&new_body, stmt);
5491 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5492 gimple_seq_add_stmt (&new_body, bind);
5494 control = create_tmp_var (unsigned_type_node, ".section");
5495 t = gimple_build_omp_continue (control, control);
5496 gimple_omp_sections_set_control (stmt, control);
5497 gimple_seq_add_stmt (&new_body, t);
5499 gimple_seq_add_seq (&new_body, olist);
5500 gimple_seq_add_seq (&new_body, dlist);
5502 new_body = maybe_catch_exception (new_body);
5504 t = gimple_build_omp_return
5505 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5506 OMP_CLAUSE_NOWAIT));
5507 gimple_seq_add_stmt (&new_body, t);
5509 gimple_bind_set_body (new_stmt, new_body);
5510 gimple_omp_set_body (stmt, NULL);
5512 gsi_replace (gsi_p, new_stmt, true);
5516 /* A subroutine of lower_omp_single. Expand the simple form of
5517 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5519 if (GOMP_single_start ())
5521 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5523 FIXME. It may be better to delay expanding the logic of this until
5524 pass_expand_omp. The expanded logic may make the job more difficult
5525 to a synchronization analysis pass. */
5528 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5530 tree tlabel = create_artificial_label ();
5531 tree flabel = create_artificial_label ();
5535 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5536 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5537 call = gimple_build_call (decl, 0);
5538 gimple_call_set_lhs (call, lhs);
5539 gimple_seq_add_stmt (pre_p, call);
5541 cond = gimple_build_cond (EQ_EXPR, lhs,
5542 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5544 gimple_seq_add_stmt (pre_p, cond);
5545 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5546 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5547 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5551 /* A subroutine of lower_omp_single. Expand the simple form of
5552 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5554 #pragma omp single copyprivate (a, b, c)
5556 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5559 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5565 GOMP_single_copy_end (©out);
5576 FIXME. It may be better to delay expanding the logic of this until
5577 pass_expand_omp. The expanded logic may make the job more difficult
5578 to a synchronization analysis pass. */
5581 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5583 tree ptr_type, t, l0, l1, l2;
5584 gimple_seq copyin_seq;
5586 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5588 ptr_type = build_pointer_type (ctx->record_type);
5589 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5591 l0 = create_artificial_label ();
5592 l1 = create_artificial_label ();
5593 l2 = create_artificial_label ();
5595 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5596 t = fold_convert (ptr_type, t);
5597 gimplify_assign (ctx->receiver_decl, t, pre_p);
5599 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5600 build_int_cst (ptr_type, 0));
5601 t = build3 (COND_EXPR, void_type_node, t,
5602 build_and_jump (&l0), build_and_jump (&l1));
5603 gimplify_and_add (t, pre_p);
5605 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5607 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5610 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5613 t = build_fold_addr_expr (ctx->sender_decl);
5614 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5615 gimplify_and_add (t, pre_p);
5617 t = build_and_jump (&l2);
5618 gimplify_and_add (t, pre_p);
5620 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5622 gimple_seq_add_seq (pre_p, copyin_seq);
5624 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5628 /* Expand code for an OpenMP single directive. */
5631 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5634 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5635 gimple_seq bind_body, dlist;
5636 struct gimplify_ctx gctx;
5638 push_gimplify_context (&gctx);
5641 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5642 &bind_body, &dlist, ctx);
5643 lower_omp (gimple_omp_body (single_stmt), ctx);
5645 gimple_seq_add_stmt (&bind_body, single_stmt);
5647 if (ctx->record_type)
5648 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5650 lower_omp_single_simple (single_stmt, &bind_body);
5652 gimple_omp_set_body (single_stmt, NULL);
5654 gimple_seq_add_seq (&bind_body, dlist);
5656 bind_body = maybe_catch_exception (bind_body);
5658 t = gimple_build_omp_return
5659 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5660 OMP_CLAUSE_NOWAIT));
5661 gimple_seq_add_stmt (&bind_body, t);
5663 block = make_node (BLOCK);
5664 bind = gimple_build_bind (NULL, bind_body, block);
5666 pop_gimplify_context (bind);
5668 gimple_bind_append_vars (bind, ctx->block_vars);
5669 BLOCK_VARS (block) = ctx->block_vars;
5670 gsi_replace (gsi_p, bind, true);
5671 if (BLOCK_VARS (block))
5672 TREE_USED (block) = 1;
5676 /* Expand code for an OpenMP master directive. */
5679 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5681 tree block, lab = NULL, x;
5682 gimple stmt = gsi_stmt (*gsi_p), bind;
5684 struct gimplify_ctx gctx;
5686 push_gimplify_context (&gctx);
5688 block = make_node (BLOCK);
5689 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5692 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5693 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5694 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5696 gimplify_and_add (x, &tseq);
5697 gimple_bind_add_seq (bind, tseq);
5699 lower_omp (gimple_omp_body (stmt), ctx);
5700 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5701 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5702 gimple_omp_set_body (stmt, NULL);
5704 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5706 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5708 pop_gimplify_context (bind);
5710 gimple_bind_append_vars (bind, ctx->block_vars);
5711 BLOCK_VARS (block) = ctx->block_vars;
5712 gsi_replace (gsi_p, bind, true);
5716 /* Expand code for an OpenMP ordered directive. */
5719 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5722 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5723 struct gimplify_ctx gctx;
5725 push_gimplify_context (&gctx);
5727 block = make_node (BLOCK);
5728 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5731 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5732 gimple_bind_add_stmt (bind, x);
5734 lower_omp (gimple_omp_body (stmt), ctx);
5735 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5736 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5737 gimple_omp_set_body (stmt, NULL);
5739 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5740 gimple_bind_add_stmt (bind, x);
5742 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5744 pop_gimplify_context (bind);
5746 gimple_bind_append_vars (bind, ctx->block_vars);
5747 BLOCK_VARS (block) = gimple_bind_vars (bind);
5748 gsi_replace (gsi_p, bind, true);
5752 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5753 substitution of a couple of function calls. But in the NAMED case,
5754 requires that languages coordinate a symbol name. It is therefore
5755 best put here in common code. */
5757 static GTY((param1_is (tree), param2_is (tree)))
5758 splay_tree critical_name_mutexes;
5761 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5764 tree name, lock, unlock;
5765 gimple stmt = gsi_stmt (*gsi_p), bind;
5767 struct gimplify_ctx gctx;
5769 name = gimple_omp_critical_name (stmt);
5775 if (!critical_name_mutexes)
5776 critical_name_mutexes
5777 = splay_tree_new_ggc (splay_tree_compare_pointers);
5779 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5784 decl = create_tmp_var_raw (ptr_type_node, NULL);
5786 new_str = ACONCAT ((".gomp_critical_user_",
5787 IDENTIFIER_POINTER (name), NULL));
5788 DECL_NAME (decl) = get_identifier (new_str);
5789 TREE_PUBLIC (decl) = 1;
5790 TREE_STATIC (decl) = 1;
5791 DECL_COMMON (decl) = 1;
5792 DECL_ARTIFICIAL (decl) = 1;
5793 DECL_IGNORED_P (decl) = 1;
5794 varpool_finalize_decl (decl);
5796 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5797 (splay_tree_value) decl);
5800 decl = (tree) n->value;
5802 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5803 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5805 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5806 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5810 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5811 lock = build_call_expr (lock, 0);
5813 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5814 unlock = build_call_expr (unlock, 0);
5817 push_gimplify_context (&gctx);
5819 block = make_node (BLOCK);
5820 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5822 tbody = gimple_bind_body (bind);
5823 gimplify_and_add (lock, &tbody);
5824 gimple_bind_set_body (bind, tbody);
5826 lower_omp (gimple_omp_body (stmt), ctx);
5827 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5828 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5829 gimple_omp_set_body (stmt, NULL);
5831 tbody = gimple_bind_body (bind);
5832 gimplify_and_add (unlock, &tbody);
5833 gimple_bind_set_body (bind, tbody);
5835 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5837 pop_gimplify_context (bind);
5838 gimple_bind_append_vars (bind, ctx->block_vars);
5839 BLOCK_VARS (block) = gimple_bind_vars (bind);
5840 gsi_replace (gsi_p, bind, true);
5844 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5845 for a lastprivate clause. Given a loop control predicate of (V
5846 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5847 is appended to *DLIST, iterator initialization is appended to
5851 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5852 gimple_seq *dlist, struct omp_context *ctx)
5854 tree clauses, cond, vinit;
5855 enum tree_code cond_code;
5858 cond_code = fd->loop.cond_code;
5859 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5861 /* When possible, use a strict equality expression. This can let VRP
5862 type optimizations deduce the value and remove a copy. */
5863 if (host_integerp (fd->loop.step, 0))
5865 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5866 if (step == 1 || step == -1)
5867 cond_code = EQ_EXPR;
5870 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5872 clauses = gimple_omp_for_clauses (fd->for_stmt);
5874 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5875 if (!gimple_seq_empty_p (stmts))
5877 gimple_seq_add_seq (&stmts, *dlist);
5880 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5881 vinit = fd->loop.n1;
5882 if (cond_code == EQ_EXPR
5883 && host_integerp (fd->loop.n2, 0)
5884 && ! integer_zerop (fd->loop.n2))
5885 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5887 /* Initialize the iterator variable, so that threads that don't execute
5888 any iterations don't execute the lastprivate clauses by accident. */
5889 gimplify_assign (fd->loop.v, vinit, body_p);
5894 /* Lower code for an OpenMP loop directive. */
5897 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5900 struct omp_for_data fd;
5901 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5902 gimple_seq omp_for_body, body, dlist, ilist;
5904 struct gimplify_ctx gctx;
5906 push_gimplify_context (&gctx);
5908 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5909 lower_omp (gimple_omp_body (stmt), ctx);
5911 block = make_node (BLOCK);
5912 new_stmt = gimple_build_bind (NULL, NULL, block);
5914 /* Move declaration of temporaries in the loop body before we make
5916 omp_for_body = gimple_omp_body (stmt);
5917 if (!gimple_seq_empty_p (omp_for_body)
5918 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5920 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5921 gimple_bind_append_vars (new_stmt, vars);
5924 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5928 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5929 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5931 /* Lower the header expressions. At this point, we can assume that
5932 the header is of the form:
5934 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5936 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5937 using the .omp_data_s mapping, if needed. */
5938 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
5940 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
5941 if (!is_gimple_min_invariant (*rhs_p))
5942 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5944 rhs_p = gimple_omp_for_final_ptr (stmt, i);
5945 if (!is_gimple_min_invariant (*rhs_p))
5946 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5948 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
5949 if (!is_gimple_min_invariant (*rhs_p))
5950 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5953 /* Once lowered, extract the bounds and clauses. */
5954 extract_omp_for_data (stmt, &fd, NULL);
5956 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
5958 gimple_seq_add_stmt (&body, stmt);
5959 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
5961 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
5964 /* After the loop, add exit clauses. */
5965 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
5966 gimple_seq_add_seq (&body, dlist);
5968 body = maybe_catch_exception (body);
5970 /* Region exit marker goes at the end of the loop body. */
5971 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
5973 pop_gimplify_context (new_stmt);
5975 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5976 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
5977 if (BLOCK_VARS (block))
5978 TREE_USED (block) = 1;
5980 gimple_bind_set_body (new_stmt, body);
5981 gimple_omp_set_body (stmt, NULL);
5982 gimple_omp_for_set_pre_body (stmt, NULL);
5983 gsi_replace (gsi_p, new_stmt, true);
5986 /* Callback for walk_stmts. Check if the current statement only contains
5987 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
5990 check_combined_parallel (gimple_stmt_iterator *gsi_p,
5991 bool *handled_ops_p,
5992 struct walk_stmt_info *wi)
5994 int *info = (int *) wi->info;
5995 gimple stmt = gsi_stmt (*gsi_p);
5997 *handled_ops_p = true;
5998 switch (gimple_code (stmt))
6002 case GIMPLE_OMP_FOR:
6003 case GIMPLE_OMP_SECTIONS:
6004 *info = *info == 0 ? 1 : -1;
6013 struct omp_taskcopy_context
6015 /* This field must be at the beginning, as we do "inheritance": Some
6016 callback functions for tree-inline.c (e.g., omp_copy_decl)
6017 receive a copy_body_data pointer that is up-casted to an
6018 omp_context pointer. */
6024 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6026 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6028 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6029 return create_tmp_var (TREE_TYPE (var), NULL);
6035 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6037 tree name, new_fields = NULL, type, f;
6039 type = lang_hooks.types.make_type (RECORD_TYPE);
6040 name = DECL_NAME (TYPE_NAME (orig_type));
6041 name = build_decl (TYPE_DECL, name, type);
6042 TYPE_NAME (type) = name;
6044 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6046 tree new_f = copy_node (f);
6047 DECL_CONTEXT (new_f) = type;
6048 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6049 TREE_CHAIN (new_f) = new_fields;
6050 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6051 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6052 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6055 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6057 TYPE_FIELDS (type) = nreverse (new_fields);
6062 /* Create task copyfn. */
6065 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6067 struct function *child_cfun;
6068 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6069 tree record_type, srecord_type, bind, list;
6070 bool record_needs_remap = false, srecord_needs_remap = false;
6072 struct omp_taskcopy_context tcctx;
6073 struct gimplify_ctx gctx;
6075 child_fn = gimple_omp_task_copy_fn (task_stmt);
6076 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6077 gcc_assert (child_cfun->cfg == NULL);
6078 child_cfun->dont_save_pending_sizes_p = 1;
6079 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6081 /* Reset DECL_CONTEXT on function arguments. */
6082 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6083 DECL_CONTEXT (t) = child_fn;
6085 /* Populate the function. */
6086 push_gimplify_context (&gctx);
6087 current_function_decl = child_fn;
6089 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6090 TREE_SIDE_EFFECTS (bind) = 1;
6092 DECL_SAVED_TREE (child_fn) = bind;
6093 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6095 /* Remap src and dst argument types if needed. */
6096 record_type = ctx->record_type;
6097 srecord_type = ctx->srecord_type;
6098 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6099 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6101 record_needs_remap = true;
6104 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6105 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6107 srecord_needs_remap = true;
6111 if (record_needs_remap || srecord_needs_remap)
6113 memset (&tcctx, '\0', sizeof (tcctx));
6114 tcctx.cb.src_fn = ctx->cb.src_fn;
6115 tcctx.cb.dst_fn = child_fn;
6116 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6117 tcctx.cb.dst_node = tcctx.cb.src_node;
6118 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6119 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6120 tcctx.cb.eh_region = -1;
6121 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6122 tcctx.cb.decl_map = pointer_map_create ();
6125 if (record_needs_remap)
6126 record_type = task_copyfn_remap_type (&tcctx, record_type);
6127 if (srecord_needs_remap)
6128 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6131 tcctx.cb.decl_map = NULL;
6133 push_cfun (child_cfun);
6135 arg = DECL_ARGUMENTS (child_fn);
6136 TREE_TYPE (arg) = build_pointer_type (record_type);
6137 sarg = TREE_CHAIN (arg);
6138 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6140 /* First pass: initialize temporaries used in record_type and srecord_type
6141 sizes and field offsets. */
6142 if (tcctx.cb.decl_map)
6143 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6144 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6148 decl = OMP_CLAUSE_DECL (c);
6149 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6152 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6153 sf = (tree) n->value;
6154 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6155 src = build_fold_indirect_ref (sarg);
6156 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6157 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6158 append_to_statement_list (t, &list);
6161 /* Second pass: copy shared var pointers and copy construct non-VLA
6162 firstprivate vars. */
6163 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6164 switch (OMP_CLAUSE_CODE (c))
6166 case OMP_CLAUSE_SHARED:
6167 decl = OMP_CLAUSE_DECL (c);
6168 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6171 f = (tree) n->value;
6172 if (tcctx.cb.decl_map)
6173 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6174 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6175 sf = (tree) n->value;
6176 if (tcctx.cb.decl_map)
6177 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6178 src = build_fold_indirect_ref (sarg);
6179 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6180 dst = build_fold_indirect_ref (arg);
6181 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6182 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6183 append_to_statement_list (t, &list);
6185 case OMP_CLAUSE_FIRSTPRIVATE:
6186 decl = OMP_CLAUSE_DECL (c);
6187 if (is_variable_sized (decl))
6189 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6192 f = (tree) n->value;
6193 if (tcctx.cb.decl_map)
6194 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6195 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6198 sf = (tree) n->value;
6199 if (tcctx.cb.decl_map)
6200 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6201 src = build_fold_indirect_ref (sarg);
6202 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6203 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6204 src = build_fold_indirect_ref (src);
6208 dst = build_fold_indirect_ref (arg);
6209 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6210 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6211 append_to_statement_list (t, &list);
6213 case OMP_CLAUSE_PRIVATE:
6214 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6216 decl = OMP_CLAUSE_DECL (c);
6217 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6218 f = (tree) n->value;
6219 if (tcctx.cb.decl_map)
6220 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6221 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6224 sf = (tree) n->value;
6225 if (tcctx.cb.decl_map)
6226 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6227 src = build_fold_indirect_ref (sarg);
6228 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6229 if (use_pointer_for_field (decl, NULL))
6230 src = build_fold_indirect_ref (src);
6234 dst = build_fold_indirect_ref (arg);
6235 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6236 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6237 append_to_statement_list (t, &list);
6243 /* Last pass: handle VLA firstprivates. */
6244 if (tcctx.cb.decl_map)
6245 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6246 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6250 decl = OMP_CLAUSE_DECL (c);
6251 if (!is_variable_sized (decl))
6253 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6256 f = (tree) n->value;
6257 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6258 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6259 ind = DECL_VALUE_EXPR (decl);
6260 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6261 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6262 n = splay_tree_lookup (ctx->sfield_map,
6263 (splay_tree_key) TREE_OPERAND (ind, 0));
6264 sf = (tree) n->value;
6265 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6266 src = build_fold_indirect_ref (sarg);
6267 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6268 src = build_fold_indirect_ref (src);
6269 dst = build_fold_indirect_ref (arg);
6270 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6271 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6272 append_to_statement_list (t, &list);
6273 n = splay_tree_lookup (ctx->field_map,
6274 (splay_tree_key) TREE_OPERAND (ind, 0));
6275 df = (tree) n->value;
6276 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6277 ptr = build_fold_indirect_ref (arg);
6278 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6279 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6280 build_fold_addr_expr (dst));
6281 append_to_statement_list (t, &list);
6284 t = build1 (RETURN_EXPR, void_type_node, NULL);
6285 append_to_statement_list (t, &list);
6287 if (tcctx.cb.decl_map)
6288 pointer_map_destroy (tcctx.cb.decl_map);
6289 pop_gimplify_context (NULL);
6290 BIND_EXPR_BODY (bind) = list;
6292 current_function_decl = ctx->cb.src_fn;
6295 /* Lower the OpenMP parallel or task directive in the current statement
6296 in GSI_P. CTX holds context information for the directive. */
6299 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6303 gimple stmt = gsi_stmt (*gsi_p);
6304 gimple par_bind, bind;
6305 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6306 struct gimplify_ctx gctx;
6308 clauses = gimple_omp_taskreg_clauses (stmt);
6309 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6310 par_body = gimple_bind_body (par_bind);
6311 child_fn = ctx->cb.dst_fn;
6312 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6313 && !gimple_omp_parallel_combined_p (stmt))
6315 struct walk_stmt_info wi;
6318 memset (&wi, 0, sizeof (wi));
6321 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6323 gimple_omp_parallel_set_combined_p (stmt, true);
6325 if (ctx->srecord_type)
6326 create_task_copyfn (stmt, ctx);
6328 push_gimplify_context (&gctx);
6332 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6333 lower_omp (par_body, ctx);
6334 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6335 lower_reduction_clauses (clauses, &par_olist, ctx);
6337 /* Declare all the variables created by mapping and the variables
6338 declared in the scope of the parallel body. */
6339 record_vars_into (ctx->block_vars, child_fn);
6340 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6342 if (ctx->record_type)
6345 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6346 : ctx->record_type, ".omp_data_o");
6347 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6352 lower_send_clauses (clauses, &ilist, &olist, ctx);
6353 lower_send_shared_vars (&ilist, &olist, ctx);
6355 /* Once all the expansions are done, sequence all the different
6356 fragments inside gimple_omp_body. */
6360 if (ctx->record_type)
6362 t = build_fold_addr_expr (ctx->sender_decl);
6363 /* fixup_child_record_type might have changed receiver_decl's type. */
6364 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6365 gimple_seq_add_stmt (&new_body,
6366 gimple_build_assign (ctx->receiver_decl, t));
6369 gimple_seq_add_seq (&new_body, par_ilist);
6370 gimple_seq_add_seq (&new_body, par_body);
6371 gimple_seq_add_seq (&new_body, par_olist);
6372 new_body = maybe_catch_exception (new_body);
6373 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6374 gimple_omp_set_body (stmt, new_body);
6376 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6377 gimple_bind_add_stmt (bind, stmt);
6380 gimple_seq_add_stmt (&ilist, bind);
6381 gimple_seq_add_seq (&ilist, olist);
6382 bind = gimple_build_bind (NULL, ilist, NULL);
6385 gsi_replace (gsi_p, bind, true);
6387 pop_gimplify_context (NULL);
6390 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6391 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6392 of OpenMP context, but with task_shared_vars set. */
6395 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6400 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6401 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6404 if (task_shared_vars
6406 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6409 /* If a global variable has been privatized, TREE_CONSTANT on
6410 ADDR_EXPR might be wrong. */
6411 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6412 recompute_tree_invariant_for_addr_expr (t);
6414 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6419 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6421 gimple stmt = gsi_stmt (*gsi_p);
6422 struct walk_stmt_info wi;
6424 if (gimple_has_location (stmt))
6425 input_location = gimple_location (stmt);
6427 if (task_shared_vars)
6428 memset (&wi, '\0', sizeof (wi));
6430 /* If we have issued syntax errors, avoid doing any heavy lifting.
6431 Just replace the OpenMP directives with a NOP to avoid
6432 confusing RTL expansion. */
6433 if (errorcount && is_gimple_omp (stmt))
6435 gsi_replace (gsi_p, gimple_build_nop (), true);
6439 switch (gimple_code (stmt))
6442 if ((ctx || task_shared_vars)
6443 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6444 ctx ? NULL : &wi, NULL)
6445 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6446 ctx ? NULL : &wi, NULL)))
6447 gimple_regimplify_operands (stmt, gsi_p);
6450 lower_omp (gimple_catch_handler (stmt), ctx);
6452 case GIMPLE_EH_FILTER:
6453 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6456 lower_omp (gimple_try_eval (stmt), ctx);
6457 lower_omp (gimple_try_cleanup (stmt), ctx);
6460 lower_omp (gimple_bind_body (stmt), ctx);
6462 case GIMPLE_OMP_PARALLEL:
6463 case GIMPLE_OMP_TASK:
6464 ctx = maybe_lookup_ctx (stmt);
6465 lower_omp_taskreg (gsi_p, ctx);
6467 case GIMPLE_OMP_FOR:
6468 ctx = maybe_lookup_ctx (stmt);
6470 lower_omp_for (gsi_p, ctx);
6472 case GIMPLE_OMP_SECTIONS:
6473 ctx = maybe_lookup_ctx (stmt);
6475 lower_omp_sections (gsi_p, ctx);
6477 case GIMPLE_OMP_SINGLE:
6478 ctx = maybe_lookup_ctx (stmt);
6480 lower_omp_single (gsi_p, ctx);
6482 case GIMPLE_OMP_MASTER:
6483 ctx = maybe_lookup_ctx (stmt);
6485 lower_omp_master (gsi_p, ctx);
6487 case GIMPLE_OMP_ORDERED:
6488 ctx = maybe_lookup_ctx (stmt);
6490 lower_omp_ordered (gsi_p, ctx);
6492 case GIMPLE_OMP_CRITICAL:
6493 ctx = maybe_lookup_ctx (stmt);
6495 lower_omp_critical (gsi_p, ctx);
6497 case GIMPLE_OMP_ATOMIC_LOAD:
6498 if ((ctx || task_shared_vars)
6499 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6500 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6501 gimple_regimplify_operands (stmt, gsi_p);
6504 if ((ctx || task_shared_vars)
6505 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6507 gimple_regimplify_operands (stmt, gsi_p);
6513 lower_omp (gimple_seq body, omp_context *ctx)
6515 location_t saved_location = input_location;
6516 gimple_stmt_iterator gsi = gsi_start (body);
6517 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6518 lower_omp_1 (&gsi, ctx);
6519 input_location = saved_location;
6522 /* Main entry point. */
6525 execute_lower_omp (void)
6529 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6530 delete_omp_context);
6532 body = gimple_body (current_function_decl);
6533 scan_omp (body, NULL);
6534 gcc_assert (taskreg_nesting_level == 0);
6536 if (all_contexts->root)
6538 struct gimplify_ctx gctx;
6540 if (task_shared_vars)
6541 push_gimplify_context (&gctx);
6542 lower_omp (body, NULL);
6543 if (task_shared_vars)
6544 pop_gimplify_context (NULL);
6549 splay_tree_delete (all_contexts);
6550 all_contexts = NULL;
6552 BITMAP_FREE (task_shared_vars);
6557 gate_lower_omp (void)
6559 return flag_openmp != 0;
6562 struct gimple_opt_pass pass_lower_omp =
6566 "omplower", /* name */
6567 gate_lower_omp, /* gate */
6568 execute_lower_omp, /* execute */
6571 0, /* static_pass_number */
6573 PROP_gimple_any, /* properties_required */
6574 PROP_gimple_lomp, /* properties_provided */
6575 0, /* properties_destroyed */
6576 0, /* todo_flags_start */
6577 TODO_dump_func /* todo_flags_finish */
6581 /* The following is a utility to diagnose OpenMP structured block violations.
6582 It is not part of the "omplower" pass, as that's invoked too late. It
6583 should be invoked by the respective front ends after gimplification. */
6585 static splay_tree all_labels;
6587 /* Check for mismatched contexts and generate an error if needed. Return
6588 true if an error is detected. */
6591 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6592 gimple branch_ctx, gimple label_ctx)
6594 if (label_ctx == branch_ctx)
6599 Previously we kept track of the label's entire context in diagnose_sb_[12]
6600 so we could traverse it and issue a correct "exit" or "enter" error
6601 message upon a structured block violation.
6603 We built the context by building a list with tree_cons'ing, but there is
6604 no easy counterpart in gimple tuples. It seems like far too much work
6605 for issuing exit/enter error messages. If someone really misses the
6606 distinct error message... patches welcome.
6610 /* Try to avoid confusing the user by producing and error message
6611 with correct "exit" or "enter" verbiage. We prefer "exit"
6612 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6613 if (branch_ctx == NULL)
6619 if (TREE_VALUE (label_ctx) == branch_ctx)
6624 label_ctx = TREE_CHAIN (label_ctx);
6629 error ("invalid exit from OpenMP structured block");
6631 error ("invalid entry to OpenMP structured block");
6634 /* If it's obvious we have an invalid entry, be specific about the error. */
6635 if (branch_ctx == NULL)
6636 error ("invalid entry to OpenMP structured block");
6638 /* Otherwise, be vague and lazy, but efficient. */
6639 error ("invalid branch to/from an OpenMP structured block");
6641 gsi_replace (gsi_p, gimple_build_nop (), false);
6645 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6646 where each label is found. */
6649 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6650 struct walk_stmt_info *wi)
6652 gimple context = (gimple) wi->info;
6653 gimple inner_context;
6654 gimple stmt = gsi_stmt (*gsi_p);
6656 *handled_ops_p = true;
6658 switch (gimple_code (stmt))
6662 case GIMPLE_OMP_PARALLEL:
6663 case GIMPLE_OMP_TASK:
6664 case GIMPLE_OMP_SECTIONS:
6665 case GIMPLE_OMP_SINGLE:
6666 case GIMPLE_OMP_SECTION:
6667 case GIMPLE_OMP_MASTER:
6668 case GIMPLE_OMP_ORDERED:
6669 case GIMPLE_OMP_CRITICAL:
6670 /* The minimal context here is just the current OMP construct. */
6671 inner_context = stmt;
6672 wi->info = inner_context;
6673 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6677 case GIMPLE_OMP_FOR:
6678 inner_context = stmt;
6679 wi->info = inner_context;
6680 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6682 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6683 diagnose_sb_1, NULL, wi);
6684 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6689 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6690 (splay_tree_value) context);
6700 /* Pass 2: Check each branch and see if its context differs from that of
6701 the destination label's context. */
6704 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6705 struct walk_stmt_info *wi)
6707 gimple context = (gimple) wi->info;
6709 gimple stmt = gsi_stmt (*gsi_p);
6711 *handled_ops_p = true;
6713 switch (gimple_code (stmt))
6717 case GIMPLE_OMP_PARALLEL:
6718 case GIMPLE_OMP_TASK:
6719 case GIMPLE_OMP_SECTIONS:
6720 case GIMPLE_OMP_SINGLE:
6721 case GIMPLE_OMP_SECTION:
6722 case GIMPLE_OMP_MASTER:
6723 case GIMPLE_OMP_ORDERED:
6724 case GIMPLE_OMP_CRITICAL:
6726 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6730 case GIMPLE_OMP_FOR:
6732 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6734 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6735 diagnose_sb_2, NULL, wi);
6736 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6742 tree lab = gimple_goto_dest (stmt);
6743 if (TREE_CODE (lab) != LABEL_DECL)
6746 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6747 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6754 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6756 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6757 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6758 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6765 diagnose_sb_0 (gsi_p, context, NULL);
6776 diagnose_omp_structured_block_errors (tree fndecl)
6778 tree save_current = current_function_decl;
6779 struct walk_stmt_info wi;
6780 struct function *old_cfun = cfun;
6781 gimple_seq body = gimple_body (fndecl);
6783 current_function_decl = fndecl;
6784 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6786 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6788 memset (&wi, 0, sizeof (wi));
6789 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6791 memset (&wi, 0, sizeof (wi));
6792 wi.want_locations = true;
6793 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6795 splay_tree_delete (all_labels);
6798 set_cfun (old_cfun);
6799 current_function_decl = save_current;
6802 #include "gt-omp-low.h"