1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
42 #include "tree-pass.h"
45 #include "splay-tree.h"
50 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
51 phases. The first phase scans the function looking for OMP statements
52 and then for variables that must be replaced to satisfy data sharing
53 clauses. The second phase expands code for the constructs, as well as
54 re-gimplifying things when variables have been replaced with complex
57 Final code generation is done by pass_expand_omp. The flowgraph is
58 scanned for parallel regions which are then moved to a new
59 function, to be invoked by the thread library. */
61 /* Context structure. Used to store information about each parallel
62 directive in the code. */
64 typedef struct omp_context
66 /* This field must be at the beginning, as we do "inheritance": Some
67 callback functions for tree-inline.c (e.g., omp_copy_decl)
68 receive a copy_body_data pointer that is up-casted to an
69 omp_context pointer. */
72 /* The tree of contexts corresponding to the encountered constructs. */
73 struct omp_context *outer;
76 /* Map variables to fields in a structure that allows communication
77 between sending and receiving threads. */
83 /* These are used just by task contexts, if task firstprivate fn is
84 needed. srecord_type is used to communicate from the thread
85 that encountered the task construct to task firstprivate fn,
86 record_type is allocated by GOMP_task, initialized by task firstprivate
87 fn and passed to the task body fn. */
88 splay_tree sfield_map;
91 /* A chain of variables to add to the top-level block surrounding the
92 construct. In the case of a parallel, this is in the child function. */
95 /* What to do with variables with implicitly determined sharing
97 enum omp_clause_default_kind default_kind;
99 /* Nesting depth of this context. Used to beautify error messages re
100 invalid gotos. The outermost ctx is depth 1, with depth 0 being
101 reserved for the main body of the function. */
104 /* True if this parallel directive is nested within another. */
109 struct omp_for_data_loop
111 tree v, n1, n2, step;
112 enum tree_code cond_code;
115 /* A structure describing the main elements of a parallel loop. */
119 struct omp_for_data_loop loop;
124 bool have_nowait, have_ordered;
125 enum omp_clause_schedule_kind sched_kind;
126 struct omp_for_data_loop *loops;
130 static splay_tree all_contexts;
131 static int taskreg_nesting_level;
132 struct omp_region *root_omp_region;
133 static bitmap task_shared_vars;
135 static void scan_omp (gimple_seq, omp_context *);
136 static tree scan_omp_1_op (tree *, int *, void *);
138 #define WALK_SUBSTMTS \
142 case GIMPLE_EH_FILTER: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
150 scan_omp_op (tree *tp, omp_context *ctx)
152 struct walk_stmt_info wi;
154 memset (&wi, 0, sizeof (wi));
156 wi.want_locations = true;
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
161 static void lower_omp (gimple_seq, omp_context *);
162 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
168 find_omp_clause (tree clauses, enum omp_clause_code kind)
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171 if (OMP_CLAUSE_CODE (clauses) == kind)
177 /* Return true if CTX is for an omp parallel. */
180 is_parallel_ctx (omp_context *ctx)
182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
186 /* Return true if CTX is for an omp task. */
189 is_task_ctx (omp_context *ctx)
191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
195 /* Return true if CTX is for an omp parallel or omp task. */
198 is_taskreg_ctx (omp_context *ctx)
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
205 /* Return true if REGION is a combined parallel+workshare region. */
208 is_combined_parallel (struct omp_region *region)
210 return region->is_combined_parallel;
214 /* Extract the header elements of parallel loop FOR_STMT and store
218 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219 struct omp_for_data_loop *loops)
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
225 struct omp_for_data_loop dummy_loop;
226 location_t loc = gimple_location (for_stmt);
228 fd->for_stmt = for_stmt;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
234 fd->loops = &fd->loop;
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
288 for (i = 0; i < fd->collapse; i++)
290 if (fd->collapse == 1)
292 else if (loops != NULL)
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build2_loc (loc,
315 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
316 loop->n2, size_one_node);
318 loop->n2 = fold_build2_loc (loc,
319 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
320 build_int_cst (TREE_TYPE (loop->n2), 1));
321 loop->cond_code = LT_EXPR;
324 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
325 loop->n2 = fold_build2_loc (loc,
326 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
327 loop->n2, size_int (-1));
329 loop->n2 = fold_build2_loc (loc,
330 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
331 build_int_cst (TREE_TYPE (loop->n2), 1));
332 loop->cond_code = GT_EXPR;
338 t = gimple_omp_for_incr (for_stmt, i);
339 gcc_assert (TREE_OPERAND (t, 0) == var);
340 switch (TREE_CODE (t))
343 case POINTER_PLUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
347 loop->step = TREE_OPERAND (t, 1);
348 loop->step = fold_build1_loc (loc,
349 NEGATE_EXPR, TREE_TYPE (loop->step),
356 if (iter_type != long_long_unsigned_type_node)
358 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
359 iter_type = long_long_unsigned_type_node;
360 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
361 && TYPE_PRECISION (TREE_TYPE (loop->v))
362 >= TYPE_PRECISION (iter_type))
366 if (loop->cond_code == LT_EXPR)
367 n = fold_build2_loc (loc,
368 PLUS_EXPR, TREE_TYPE (loop->v),
369 loop->n2, loop->step);
372 if (TREE_CODE (n) != INTEGER_CST
373 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
374 iter_type = long_long_unsigned_type_node;
376 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
377 > TYPE_PRECISION (iter_type))
381 if (loop->cond_code == LT_EXPR)
384 n2 = fold_build2_loc (loc,
385 PLUS_EXPR, TREE_TYPE (loop->v),
386 loop->n2, loop->step);
390 n1 = fold_build2_loc (loc,
391 MINUS_EXPR, TREE_TYPE (loop->v),
392 loop->n2, loop->step);
395 if (TREE_CODE (n1) != INTEGER_CST
396 || TREE_CODE (n2) != INTEGER_CST
397 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
398 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
399 iter_type = long_long_unsigned_type_node;
403 if (collapse_count && *collapse_count == NULL)
405 if ((i == 0 || count != NULL_TREE)
406 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
407 && TREE_CONSTANT (loop->n1)
408 && TREE_CONSTANT (loop->n2)
409 && TREE_CODE (loop->step) == INTEGER_CST)
411 tree itype = TREE_TYPE (loop->v);
413 if (POINTER_TYPE_P (itype))
415 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
416 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
417 t = fold_build2_loc (loc,
419 fold_convert_loc (loc, itype, loop->step), t);
420 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
421 fold_convert_loc (loc, itype, loop->n2));
422 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
423 fold_convert_loc (loc, itype, loop->n1));
424 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
425 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
426 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
427 fold_build1_loc (loc, NEGATE_EXPR, itype,
428 fold_convert_loc (loc, itype,
431 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
432 fold_convert_loc (loc, itype, loop->step));
433 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
434 if (count != NULL_TREE)
435 count = fold_build2_loc (loc,
436 MULT_EXPR, long_long_unsigned_type_node,
440 if (TREE_CODE (count) != INTEGER_CST)
450 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
451 iter_type = long_long_unsigned_type_node;
453 iter_type = long_integer_type_node;
455 else if (collapse_iter && *collapse_iter != NULL)
456 iter_type = TREE_TYPE (*collapse_iter);
457 fd->iter_type = iter_type;
458 if (collapse_iter && *collapse_iter == NULL)
459 *collapse_iter = create_tmp_var (iter_type, ".iter");
460 if (collapse_count && *collapse_count == NULL)
463 *collapse_count = fold_convert_loc (loc, iter_type, count);
465 *collapse_count = create_tmp_var (iter_type, ".count");
468 if (fd->collapse > 1)
470 fd->loop.v = *collapse_iter;
471 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
472 fd->loop.n2 = *collapse_count;
473 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
474 fd->loop.cond_code = LT_EXPR;
479 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
490 #pragma omp parallel for schedule (guided, i * 4)
495 # BLOCK 2 (PAR_ENTRY_BB)
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
503 #pragma omp for schedule (guided, D.1598)
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
522 workshare_safe_to_combine_p (basic_block ws_entry_bb)
524 struct omp_for_data fd;
525 gimple ws_stmt = last_stmt (ws_entry_bb);
527 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
530 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
532 extract_omp_for_data (ws_stmt, &fd, NULL);
534 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
536 if (fd.iter_type != long_integer_type_node)
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
544 if (!is_gimple_min_invariant (fd.loop.n1)
545 || !is_gimple_min_invariant (fd.loop.n2)
546 || !is_gimple_min_invariant (fd.loop.step)
547 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
554 /* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
558 static VEC(tree,gc) *
559 get_ws_args_for (gimple ws_stmt)
562 location_t loc = gimple_location (ws_stmt);
563 VEC(tree,gc) *ws_args;
565 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
567 struct omp_for_data fd;
569 extract_omp_for_data (ws_stmt, &fd, NULL);
571 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
573 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
574 VEC_quick_push (tree, ws_args, t);
576 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
577 VEC_quick_push (tree, ws_args, t);
579 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
580 VEC_quick_push (tree, ws_args, t);
584 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
585 VEC_quick_push (tree, ws_args, t);
590 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
592 /* Number of sections is equal to the number of edges from the
593 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
594 the exit of the sections region. */
595 basic_block bb = single_succ (gimple_bb (ws_stmt));
596 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
597 ws_args = VEC_alloc (tree, gc, 1);
598 VEC_quick_push (tree, ws_args, t);
606 /* Discover whether REGION is a combined parallel+workshare region. */
609 determine_parallel_type (struct omp_region *region)
611 basic_block par_entry_bb, par_exit_bb;
612 basic_block ws_entry_bb, ws_exit_bb;
614 if (region == NULL || region->inner == NULL
615 || region->exit == NULL || region->inner->exit == NULL
616 || region->inner->cont == NULL)
619 /* We only support parallel+for and parallel+sections. */
620 if (region->type != GIMPLE_OMP_PARALLEL
621 || (region->inner->type != GIMPLE_OMP_FOR
622 && region->inner->type != GIMPLE_OMP_SECTIONS))
625 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
626 WS_EXIT_BB -> PAR_EXIT_BB. */
627 par_entry_bb = region->entry;
628 par_exit_bb = region->exit;
629 ws_entry_bb = region->inner->entry;
630 ws_exit_bb = region->inner->exit;
632 if (single_succ (par_entry_bb) == ws_entry_bb
633 && single_succ (ws_exit_bb) == par_exit_bb
634 && workshare_safe_to_combine_p (ws_entry_bb)
635 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
636 || (last_and_only_stmt (ws_entry_bb)
637 && last_and_only_stmt (par_exit_bb))))
639 gimple ws_stmt = last_stmt (ws_entry_bb);
641 if (region->inner->type == GIMPLE_OMP_FOR)
643 /* If this is a combined parallel loop, we need to determine
644 whether or not to use the combined library calls. There
645 are two cases where we do not apply the transformation:
646 static loops and any kind of ordered loop. In the first
647 case, we already open code the loop so there is no need
648 to do anything else. In the latter case, the combined
649 parallel loop call would still need extra synchronization
650 to implement ordered semantics, so there would not be any
651 gain in using the combined call. */
652 tree clauses = gimple_omp_for_clauses (ws_stmt);
653 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
655 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
656 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
658 region->is_combined_parallel = false;
659 region->inner->is_combined_parallel = false;
664 region->is_combined_parallel = true;
665 region->inner->is_combined_parallel = true;
666 region->ws_args = get_ws_args_for (ws_stmt);
671 /* Return true if EXPR is variable sized. */
674 is_variable_sized (const_tree expr)
676 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
679 /* Return true if DECL is a reference type. */
682 is_reference (tree decl)
684 return lang_hooks.decls.omp_privatize_by_reference (decl);
687 /* Lookup variables in the decl or field splay trees. The "maybe" form
688 allows for the variable form to not have been entered, otherwise we
689 assert that the variable must have been entered. */
692 lookup_decl (tree var, omp_context *ctx)
695 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
700 maybe_lookup_decl (const_tree var, omp_context *ctx)
703 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
704 return n ? *n : NULL_TREE;
708 lookup_field (tree var, omp_context *ctx)
711 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
712 return (tree) n->value;
716 lookup_sfield (tree var, omp_context *ctx)
719 n = splay_tree_lookup (ctx->sfield_map
720 ? ctx->sfield_map : ctx->field_map,
721 (splay_tree_key) var);
722 return (tree) n->value;
726 maybe_lookup_field (tree var, omp_context *ctx)
729 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
730 return n ? (tree) n->value : NULL_TREE;
733 /* Return true if DECL should be copied by pointer. SHARED_CTX is
734 the parallel context if DECL is to be shared. */
737 use_pointer_for_field (tree decl, omp_context *shared_ctx)
739 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
742 /* We can only use copy-in/copy-out semantics for shared variables
743 when we know the value is not accessible from an outer scope. */
746 /* ??? Trivially accessible from anywhere. But why would we even
747 be passing an address in this case? Should we simply assert
748 this to be false, or should we have a cleanup pass that removes
749 these from the list of mappings? */
750 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
753 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
754 without analyzing the expression whether or not its location
755 is accessible to anyone else. In the case of nested parallel
756 regions it certainly may be. */
757 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
760 /* Do not use copy-in/copy-out for variables that have their
762 if (TREE_ADDRESSABLE (decl))
765 /* Disallow copy-in/out in nested parallel if
766 decl is shared in outer parallel, otherwise
767 each thread could store the shared variable
768 in its own copy-in location, making the
769 variable no longer really shared. */
770 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
774 for (up = shared_ctx->outer; up; up = up->outer)
775 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
782 for (c = gimple_omp_taskreg_clauses (up->stmt);
783 c; c = OMP_CLAUSE_CHAIN (c))
784 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
785 && OMP_CLAUSE_DECL (c) == decl)
793 /* For tasks avoid using copy-in/out, unless they are readonly
794 (in which case just copy-in is used). As tasks can be
795 deferred or executed in different thread, when GOMP_task
796 returns, the task hasn't necessarily terminated. */
797 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
799 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
800 if (is_gimple_reg (outer))
802 /* Taking address of OUTER in lower_send_shared_vars
803 might need regimplification of everything that uses the
805 if (!task_shared_vars)
806 task_shared_vars = BITMAP_ALLOC (NULL);
807 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
808 TREE_ADDRESSABLE (outer) = 1;
817 /* Create a new VAR_DECL and copy information from VAR to it. */
820 copy_var_decl (tree var, tree name, tree type)
822 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
824 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
825 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
826 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
827 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
828 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
829 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
830 TREE_USED (copy) = 1;
831 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
836 /* Construct a new automatic decl similar to VAR. */
839 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
841 tree copy = copy_var_decl (var, name, type);
843 DECL_CONTEXT (copy) = current_function_decl;
844 TREE_CHAIN (copy) = ctx->block_vars;
845 ctx->block_vars = copy;
851 omp_copy_decl_1 (tree var, omp_context *ctx)
853 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
856 /* Build tree nodes to access the field for VAR on the receiver side. */
859 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
861 tree x, field = lookup_field (var, ctx);
863 /* If the receiver record type was remapped in the child function,
864 remap the field into the new record type. */
865 x = maybe_lookup_field (field, ctx);
869 x = build_simple_mem_ref (ctx->receiver_decl);
870 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
872 x = build_simple_mem_ref (x);
877 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
878 of a parallel, this is a component reference; for workshare constructs
879 this is some variable. */
882 build_outer_var_ref (tree var, omp_context *ctx)
886 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
888 else if (is_variable_sized (var))
890 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
891 x = build_outer_var_ref (x, ctx);
892 x = build_simple_mem_ref (x);
894 else if (is_taskreg_ctx (ctx))
896 bool by_ref = use_pointer_for_field (var, NULL);
897 x = build_receiver_ref (var, by_ref, ctx);
900 x = lookup_decl (var, ctx->outer);
901 else if (is_reference (var))
902 /* This can happen with orphaned constructs. If var is reference, it is
903 possible it is shared and as such valid. */
908 if (is_reference (var))
909 x = build_simple_mem_ref (x);
914 /* Build tree nodes to access the field for VAR on the sender side. */
917 build_sender_ref (tree var, omp_context *ctx)
919 tree field = lookup_sfield (var, ctx);
920 return build3 (COMPONENT_REF, TREE_TYPE (field),
921 ctx->sender_decl, field, NULL);
924 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
927 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
929 tree field, type, sfield = NULL_TREE;
931 gcc_assert ((mask & 1) == 0
932 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
933 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
934 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
936 type = TREE_TYPE (var);
938 type = build_pointer_type (type);
939 else if ((mask & 3) == 1 && is_reference (var))
940 type = TREE_TYPE (type);
942 field = build_decl (DECL_SOURCE_LOCATION (var),
943 FIELD_DECL, DECL_NAME (var), type);
945 /* Remember what variable this field was created for. This does have a
946 side effect of making dwarf2out ignore this member, so for helpful
947 debugging we clear it later in delete_omp_context. */
948 DECL_ABSTRACT_ORIGIN (field) = var;
949 if (type == TREE_TYPE (var))
951 DECL_ALIGN (field) = DECL_ALIGN (var);
952 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
953 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
956 DECL_ALIGN (field) = TYPE_ALIGN (type);
960 insert_field_into_struct (ctx->record_type, field);
961 if (ctx->srecord_type)
963 sfield = build_decl (DECL_SOURCE_LOCATION (var),
964 FIELD_DECL, DECL_NAME (var), type);
965 DECL_ABSTRACT_ORIGIN (sfield) = var;
966 DECL_ALIGN (sfield) = DECL_ALIGN (field);
967 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
968 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
969 insert_field_into_struct (ctx->srecord_type, sfield);
974 if (ctx->srecord_type == NULL_TREE)
978 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
979 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
980 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
982 sfield = build_decl (DECL_SOURCE_LOCATION (var),
983 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
984 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
985 insert_field_into_struct (ctx->srecord_type, sfield);
986 splay_tree_insert (ctx->sfield_map,
987 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
988 (splay_tree_value) sfield);
992 insert_field_into_struct ((mask & 1) ? ctx->record_type
993 : ctx->srecord_type, field);
997 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
998 (splay_tree_value) field);
999 if ((mask & 2) && ctx->sfield_map)
1000 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1001 (splay_tree_value) sfield);
1005 install_var_local (tree var, omp_context *ctx)
1007 tree new_var = omp_copy_decl_1 (var, ctx);
1008 insert_decl_map (&ctx->cb, var, new_var);
1012 /* Adjust the replacement for DECL in CTX for the new context. This means
1013 copying the DECL_VALUE_EXPR, and fixing up the type. */
1016 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1018 tree new_decl, size;
1020 new_decl = lookup_decl (decl, ctx);
1022 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1024 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1025 && DECL_HAS_VALUE_EXPR_P (decl))
1027 tree ve = DECL_VALUE_EXPR (decl);
1028 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1029 SET_DECL_VALUE_EXPR (new_decl, ve);
1030 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1033 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1035 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1036 if (size == error_mark_node)
1037 size = TYPE_SIZE (TREE_TYPE (new_decl));
1038 DECL_SIZE (new_decl) = size;
1040 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1041 if (size == error_mark_node)
1042 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1043 DECL_SIZE_UNIT (new_decl) = size;
1047 /* The callback for remap_decl. Search all containing contexts for a
1048 mapping of the variable; this avoids having to duplicate the splay
1049 tree ahead of time. We know a mapping doesn't already exist in the
1050 given context. Create new mappings to implement default semantics. */
1053 omp_copy_decl (tree var, copy_body_data *cb)
1055 omp_context *ctx = (omp_context *) cb;
1058 if (TREE_CODE (var) == LABEL_DECL)
1060 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1061 DECL_CONTEXT (new_var) = current_function_decl;
1062 insert_decl_map (&ctx->cb, var, new_var);
1066 while (!is_taskreg_ctx (ctx))
1071 new_var = maybe_lookup_decl (var, ctx);
1076 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1079 return error_mark_node;
1083 /* Return the parallel region associated with STMT. */
1085 /* Debugging dumps for parallel regions. */
1086 void dump_omp_region (FILE *, struct omp_region *, int);
1087 void debug_omp_region (struct omp_region *);
1088 void debug_all_omp_regions (void);
1090 /* Dump the parallel region tree rooted at REGION. */
1093 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1095 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1096 gimple_code_name[region->type]);
1099 dump_omp_region (file, region->inner, indent + 4);
1103 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1104 region->cont->index);
1108 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1109 region->exit->index);
1111 fprintf (file, "%*s[no exit marker]\n", indent, "");
1114 dump_omp_region (file, region->next, indent);
1118 debug_omp_region (struct omp_region *region)
1120 dump_omp_region (stderr, region, 0);
1124 debug_all_omp_regions (void)
1126 dump_omp_region (stderr, root_omp_region, 0);
1130 /* Create a new parallel region starting at STMT inside region PARENT. */
1133 new_omp_region (basic_block bb, enum gimple_code type,
1134 struct omp_region *parent)
1136 struct omp_region *region = XCNEW (struct omp_region);
1138 region->outer = parent;
1140 region->type = type;
1144 /* This is a nested region. Add it to the list of inner
1145 regions in PARENT. */
1146 region->next = parent->inner;
1147 parent->inner = region;
1151 /* This is a toplevel region. Add it to the list of toplevel
1152 regions in ROOT_OMP_REGION. */
1153 region->next = root_omp_region;
1154 root_omp_region = region;
1160 /* Release the memory associated with the region tree rooted at REGION. */
1163 free_omp_region_1 (struct omp_region *region)
1165 struct omp_region *i, *n;
1167 for (i = region->inner; i ; i = n)
1170 free_omp_region_1 (i);
1176 /* Release the memory for the entire omp region tree. */
1179 free_omp_regions (void)
1181 struct omp_region *r, *n;
1182 for (r = root_omp_region; r ; r = n)
1185 free_omp_region_1 (r);
1187 root_omp_region = NULL;
1191 /* Create a new context, with OUTER_CTX being the surrounding context. */
1193 static omp_context *
1194 new_omp_context (gimple stmt, omp_context *outer_ctx)
1196 omp_context *ctx = XCNEW (omp_context);
1198 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1199 (splay_tree_value) ctx);
1204 ctx->outer = outer_ctx;
1205 ctx->cb = outer_ctx->cb;
1206 ctx->cb.block = NULL;
1207 ctx->depth = outer_ctx->depth + 1;
1211 ctx->cb.src_fn = current_function_decl;
1212 ctx->cb.dst_fn = current_function_decl;
1213 ctx->cb.src_node = cgraph_node (current_function_decl);
1214 ctx->cb.dst_node = ctx->cb.src_node;
1215 ctx->cb.src_cfun = cfun;
1216 ctx->cb.copy_decl = omp_copy_decl;
1217 ctx->cb.eh_lp_nr = 0;
1218 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1222 ctx->cb.decl_map = pointer_map_create ();
1227 static gimple_seq maybe_catch_exception (gimple_seq);
1229 /* Finalize task copyfn. */
1232 finalize_task_copyfn (gimple task_stmt)
1234 struct function *child_cfun;
1235 tree child_fn, old_fn;
1236 gimple_seq seq, new_seq;
1239 child_fn = gimple_omp_task_copy_fn (task_stmt);
1240 if (child_fn == NULL_TREE)
1243 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1247 = cfun->curr_properties;
1249 old_fn = current_function_decl;
1250 push_cfun (child_cfun);
1251 current_function_decl = child_fn;
1252 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1253 seq = gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq, bind);
1255 new_seq = maybe_catch_exception (seq);
1258 bind = gimple_build_bind (NULL, new_seq, NULL);
1259 seq = gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq, bind);
1262 gimple_set_body (child_fn, seq);
1264 current_function_decl = old_fn;
1266 cgraph_add_new_function (child_fn, false);
1269 /* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1273 delete_omp_context (splay_tree_value value)
1275 omp_context *ctx = (omp_context *) value;
1277 pointer_map_destroy (ctx->cb.decl_map);
1280 splay_tree_delete (ctx->field_map);
1281 if (ctx->sfield_map)
1282 splay_tree_delete (ctx->sfield_map);
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx->record_type)
1289 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1290 DECL_ABSTRACT_ORIGIN (t) = NULL;
1292 if (ctx->srecord_type)
1295 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1296 DECL_ABSTRACT_ORIGIN (t) = NULL;
1299 if (is_task_ctx (ctx))
1300 finalize_task_copyfn (ctx->stmt);
1305 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1309 fixup_child_record_type (omp_context *ctx)
1311 tree f, type = ctx->record_type;
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1318 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1322 tree name, new_fields = NULL;
1324 type = lang_hooks.types.make_type (RECORD_TYPE);
1325 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1326 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1327 TYPE_DECL, name, type);
1328 TYPE_NAME (type) = name;
1330 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1332 tree new_f = copy_node (f);
1333 DECL_CONTEXT (new_f) = type;
1334 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1335 TREE_CHAIN (new_f) = new_fields;
1336 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1337 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1339 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1346 (splay_tree_value) new_f);
1348 TYPE_FIELDS (type) = nreverse (new_fields);
1352 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1355 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1359 scan_sharing_clauses (tree clauses, omp_context *ctx)
1362 bool scan_array_reductions = false;
1364 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1368 switch (OMP_CLAUSE_CODE (c))
1370 case OMP_CLAUSE_PRIVATE:
1371 decl = OMP_CLAUSE_DECL (c);
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1374 else if (!is_variable_sized (decl))
1375 install_var_local (decl, ctx);
1378 case OMP_CLAUSE_SHARED:
1379 gcc_assert (is_taskreg_ctx (ctx));
1380 decl = OMP_CLAUSE_DECL (c);
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1382 || !is_variable_sized (decl));
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1387 by_ref = use_pointer_for_field (decl, ctx);
1388 if (! TREE_READONLY (decl)
1389 || TREE_ADDRESSABLE (decl)
1391 || is_reference (decl))
1393 install_var_field (decl, by_ref, 3, ctx);
1394 install_var_local (decl, ctx);
1397 /* We don't need to copy const scalar vars back. */
1398 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1401 case OMP_CLAUSE_LASTPRIVATE:
1402 /* Let the corresponding firstprivate clause create
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1408 case OMP_CLAUSE_FIRSTPRIVATE:
1409 case OMP_CLAUSE_REDUCTION:
1410 decl = OMP_CLAUSE_DECL (c);
1412 if (is_variable_sized (decl))
1414 if (is_task_ctx (ctx))
1415 install_var_field (decl, false, 1, ctx);
1418 else if (is_taskreg_ctx (ctx))
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1422 by_ref = use_pointer_for_field (decl, NULL);
1424 if (is_task_ctx (ctx)
1425 && (global || by_ref || is_reference (decl)))
1427 install_var_field (decl, false, 1, ctx);
1429 install_var_field (decl, by_ref, 2, ctx);
1432 install_var_field (decl, by_ref, 3, ctx);
1434 install_var_local (decl, ctx);
1437 case OMP_CLAUSE_COPYPRIVATE:
1438 case OMP_CLAUSE_COPYIN:
1439 decl = OMP_CLAUSE_DECL (c);
1440 by_ref = use_pointer_for_field (decl, NULL);
1441 install_var_field (decl, by_ref, 3, ctx);
1444 case OMP_CLAUSE_DEFAULT:
1445 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1449 case OMP_CLAUSE_NUM_THREADS:
1450 case OMP_CLAUSE_SCHEDULE:
1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1455 case OMP_CLAUSE_NOWAIT:
1456 case OMP_CLAUSE_ORDERED:
1457 case OMP_CLAUSE_COLLAPSE:
1458 case OMP_CLAUSE_UNTIED:
1466 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1468 switch (OMP_CLAUSE_CODE (c))
1470 case OMP_CLAUSE_LASTPRIVATE:
1471 /* Let the corresponding firstprivate clause create
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1474 scan_array_reductions = true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1479 case OMP_CLAUSE_PRIVATE:
1480 case OMP_CLAUSE_FIRSTPRIVATE:
1481 case OMP_CLAUSE_REDUCTION:
1482 decl = OMP_CLAUSE_DECL (c);
1483 if (is_variable_sized (decl))
1484 install_var_local (decl, ctx);
1485 fixup_remapped_decl (decl, ctx,
1486 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1490 scan_array_reductions = true;
1493 case OMP_CLAUSE_SHARED:
1494 decl = OMP_CLAUSE_DECL (c);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1496 fixup_remapped_decl (decl, ctx, false);
1499 case OMP_CLAUSE_COPYPRIVATE:
1500 case OMP_CLAUSE_COPYIN:
1501 case OMP_CLAUSE_DEFAULT:
1503 case OMP_CLAUSE_NUM_THREADS:
1504 case OMP_CLAUSE_SCHEDULE:
1505 case OMP_CLAUSE_NOWAIT:
1506 case OMP_CLAUSE_ORDERED:
1507 case OMP_CLAUSE_COLLAPSE:
1508 case OMP_CLAUSE_UNTIED:
1516 if (scan_array_reductions)
1517 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1518 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1519 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1521 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1522 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1524 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1525 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1526 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1529 /* Create a new name for omp child function. Returns an identifier. */
1531 static GTY(()) unsigned int tmp_ompfn_id_num;
1534 create_omp_child_function_name (bool task_copy)
1536 return (clone_function_name (current_function_decl,
1537 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1540 /* Build a decl for the omp child function. It'll not contain a body
1541 yet, just the bare decl. */
1544 create_omp_child_function (omp_context *ctx, bool task_copy)
1546 tree decl, type, name, t;
1548 name = create_omp_child_function_name (task_copy);
1550 type = build_function_type_list (void_type_node, ptr_type_node,
1551 ptr_type_node, NULL_TREE);
1553 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1555 decl = build_decl (gimple_location (ctx->stmt),
1556 FUNCTION_DECL, name, type);
1559 ctx->cb.dst_fn = decl;
1561 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1563 TREE_STATIC (decl) = 1;
1564 TREE_USED (decl) = 1;
1565 DECL_ARTIFICIAL (decl) = 1;
1566 DECL_IGNORED_P (decl) = 0;
1567 TREE_PUBLIC (decl) = 0;
1568 DECL_UNINLINABLE (decl) = 1;
1569 DECL_EXTERNAL (decl) = 0;
1570 DECL_CONTEXT (decl) = NULL_TREE;
1571 DECL_INITIAL (decl) = make_node (BLOCK);
1573 t = build_decl (DECL_SOURCE_LOCATION (decl),
1574 RESULT_DECL, NULL_TREE, void_type_node);
1575 DECL_ARTIFICIAL (t) = 1;
1576 DECL_IGNORED_P (t) = 1;
1577 DECL_CONTEXT (t) = decl;
1578 DECL_RESULT (decl) = t;
1580 t = build_decl (DECL_SOURCE_LOCATION (decl),
1581 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1582 DECL_ARTIFICIAL (t) = 1;
1583 DECL_ARG_TYPE (t) = ptr_type_node;
1584 DECL_CONTEXT (t) = current_function_decl;
1586 DECL_ARGUMENTS (decl) = t;
1588 ctx->receiver_decl = t;
1591 t = build_decl (DECL_SOURCE_LOCATION (decl),
1592 PARM_DECL, get_identifier (".omp_data_o"),
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1598 TREE_ADDRESSABLE (t) = 1;
1599 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1600 DECL_ARGUMENTS (decl) = t;
1603 /* Allocate memory for the function structure. The call to
1604 allocate_struct_function clobbers CFUN, so we need to restore
1606 push_struct_function (decl);
1607 cfun->function_end_locus = gimple_location (ctx->stmt);
1612 /* Scan an OpenMP parallel directive. */
1615 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1619 gimple stmt = gsi_stmt (*gsi);
1621 /* Ignore parallel directives with empty bodies, unless there
1622 are copyin clauses. */
1624 && empty_body_p (gimple_omp_body (stmt))
1625 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1626 OMP_CLAUSE_COPYIN) == NULL)
1628 gsi_replace (gsi, gimple_build_nop (), false);
1632 ctx = new_omp_context (stmt, outer_ctx);
1633 if (taskreg_nesting_level > 1)
1634 ctx->is_nested = true;
1635 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1636 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1637 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1638 name = create_tmp_var_name (".omp_data_s");
1639 name = build_decl (gimple_location (stmt),
1640 TYPE_DECL, name, ctx->record_type);
1641 TYPE_NAME (ctx->record_type) = name;
1642 create_omp_child_function (ctx, false);
1643 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1645 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1646 scan_omp (gimple_omp_body (stmt), ctx);
1648 if (TYPE_FIELDS (ctx->record_type) == NULL)
1649 ctx->record_type = ctx->receiver_decl = NULL;
1652 layout_type (ctx->record_type);
1653 fixup_child_record_type (ctx);
1657 /* Scan an OpenMP task directive. */
1660 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1664 gimple stmt = gsi_stmt (*gsi);
1665 location_t loc = gimple_location (stmt);
1667 /* Ignore task directives with empty bodies. */
1669 && empty_body_p (gimple_omp_body (stmt)))
1671 gsi_replace (gsi, gimple_build_nop (), false);
1675 ctx = new_omp_context (stmt, outer_ctx);
1676 if (taskreg_nesting_level > 1)
1677 ctx->is_nested = true;
1678 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1679 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1680 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1681 name = create_tmp_var_name (".omp_data_s");
1682 name = build_decl (gimple_location (stmt),
1683 TYPE_DECL, name, ctx->record_type);
1684 TYPE_NAME (ctx->record_type) = name;
1685 create_omp_child_function (ctx, false);
1686 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1688 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1690 if (ctx->srecord_type)
1692 name = create_tmp_var_name (".omp_data_a");
1693 name = build_decl (gimple_location (stmt),
1694 TYPE_DECL, name, ctx->srecord_type);
1695 TYPE_NAME (ctx->srecord_type) = name;
1696 create_omp_child_function (ctx, true);
1699 scan_omp (gimple_omp_body (stmt), ctx);
1701 if (TYPE_FIELDS (ctx->record_type) == NULL)
1703 ctx->record_type = ctx->receiver_decl = NULL;
1704 t = build_int_cst (long_integer_type_node, 0);
1705 gimple_omp_task_set_arg_size (stmt, t);
1706 t = build_int_cst (long_integer_type_node, 1);
1707 gimple_omp_task_set_arg_align (stmt, t);
1711 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1712 /* Move VLA fields to the end. */
1713 p = &TYPE_FIELDS (ctx->record_type);
1715 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1716 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1719 *p = TREE_CHAIN (*p);
1720 TREE_CHAIN (*q) = NULL_TREE;
1721 q = &TREE_CHAIN (*q);
1724 p = &TREE_CHAIN (*p);
1726 layout_type (ctx->record_type);
1727 fixup_child_record_type (ctx);
1728 if (ctx->srecord_type)
1729 layout_type (ctx->srecord_type);
1730 t = fold_convert_loc (loc, long_integer_type_node,
1731 TYPE_SIZE_UNIT (ctx->record_type));
1732 gimple_omp_task_set_arg_size (stmt, t);
1733 t = build_int_cst (long_integer_type_node,
1734 TYPE_ALIGN_UNIT (ctx->record_type));
1735 gimple_omp_task_set_arg_align (stmt, t);
1740 /* Scan an OpenMP loop directive. */
1743 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1748 ctx = new_omp_context (stmt, outer_ctx);
1750 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1752 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1753 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1755 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1756 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1757 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1758 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1760 scan_omp (gimple_omp_body (stmt), ctx);
1763 /* Scan an OpenMP sections directive. */
1766 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1770 ctx = new_omp_context (stmt, outer_ctx);
1771 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1772 scan_omp (gimple_omp_body (stmt), ctx);
1775 /* Scan an OpenMP single directive. */
1778 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1783 ctx = new_omp_context (stmt, outer_ctx);
1784 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1785 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1786 name = create_tmp_var_name (".omp_copy_s");
1787 name = build_decl (gimple_location (stmt),
1788 TYPE_DECL, name, ctx->record_type);
1789 TYPE_NAME (ctx->record_type) = name;
1791 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1792 scan_omp (gimple_omp_body (stmt), ctx);
1794 if (TYPE_FIELDS (ctx->record_type) == NULL)
1795 ctx->record_type = NULL;
1797 layout_type (ctx->record_type);
1801 /* Check OpenMP nesting restrictions. */
1803 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1805 switch (gimple_code (stmt))
1807 case GIMPLE_OMP_FOR:
1808 case GIMPLE_OMP_SECTIONS:
1809 case GIMPLE_OMP_SINGLE:
1811 for (; ctx != NULL; ctx = ctx->outer)
1812 switch (gimple_code (ctx->stmt))
1814 case GIMPLE_OMP_FOR:
1815 case GIMPLE_OMP_SECTIONS:
1816 case GIMPLE_OMP_SINGLE:
1817 case GIMPLE_OMP_ORDERED:
1818 case GIMPLE_OMP_MASTER:
1819 case GIMPLE_OMP_TASK:
1820 if (is_gimple_call (stmt))
1822 warning (0, "barrier region may not be closely nested inside "
1823 "of work-sharing, critical, ordered, master or "
1824 "explicit task region");
1827 warning (0, "work-sharing region may not be closely nested inside "
1828 "of work-sharing, critical, ordered, master or explicit "
1831 case GIMPLE_OMP_PARALLEL:
1837 case GIMPLE_OMP_MASTER:
1838 for (; ctx != NULL; ctx = ctx->outer)
1839 switch (gimple_code (ctx->stmt))
1841 case GIMPLE_OMP_FOR:
1842 case GIMPLE_OMP_SECTIONS:
1843 case GIMPLE_OMP_SINGLE:
1844 case GIMPLE_OMP_TASK:
1845 warning (0, "master region may not be closely nested inside "
1846 "of work-sharing or explicit task region");
1848 case GIMPLE_OMP_PARALLEL:
1854 case GIMPLE_OMP_ORDERED:
1855 for (; ctx != NULL; ctx = ctx->outer)
1856 switch (gimple_code (ctx->stmt))
1858 case GIMPLE_OMP_CRITICAL:
1859 case GIMPLE_OMP_TASK:
1860 warning (0, "ordered region may not be closely nested inside "
1861 "of critical or explicit task region");
1863 case GIMPLE_OMP_FOR:
1864 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1865 OMP_CLAUSE_ORDERED) == NULL)
1866 warning (0, "ordered region must be closely nested inside "
1867 "a loop region with an ordered clause");
1869 case GIMPLE_OMP_PARALLEL:
1875 case GIMPLE_OMP_CRITICAL:
1876 for (; ctx != NULL; ctx = ctx->outer)
1877 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1878 && (gimple_omp_critical_name (stmt)
1879 == gimple_omp_critical_name (ctx->stmt)))
1881 warning (0, "critical region may not be nested inside a critical "
1882 "region with the same name");
1892 /* Helper function scan_omp.
1894 Callback for walk_tree or operators in walk_gimple_stmt used to
1895 scan for OpenMP directives in TP. */
1898 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1900 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1901 omp_context *ctx = (omp_context *) wi->info;
1904 switch (TREE_CODE (t))
1911 *tp = remap_decl (t, &ctx->cb);
1915 if (ctx && TYPE_P (t))
1916 *tp = remap_type (t, &ctx->cb);
1917 else if (!DECL_P (t))
1922 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1923 if (tem != TREE_TYPE (t))
1925 if (TREE_CODE (t) == INTEGER_CST)
1926 *tp = build_int_cst_wide (tem,
1927 TREE_INT_CST_LOW (t),
1928 TREE_INT_CST_HIGH (t));
1930 TREE_TYPE (t) = tem;
1941 /* Helper function for scan_omp.
1943 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1944 the current statement in GSI. */
1947 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1948 struct walk_stmt_info *wi)
1950 gimple stmt = gsi_stmt (*gsi);
1951 omp_context *ctx = (omp_context *) wi->info;
1953 if (gimple_has_location (stmt))
1954 input_location = gimple_location (stmt);
1956 /* Check the OpenMP nesting restrictions. */
1959 if (is_gimple_omp (stmt))
1960 check_omp_nesting_restrictions (stmt, ctx);
1961 else if (is_gimple_call (stmt))
1963 tree fndecl = gimple_call_fndecl (stmt);
1964 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1965 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1966 check_omp_nesting_restrictions (stmt, ctx);
1970 *handled_ops_p = true;
1972 switch (gimple_code (stmt))
1974 case GIMPLE_OMP_PARALLEL:
1975 taskreg_nesting_level++;
1976 scan_omp_parallel (gsi, ctx);
1977 taskreg_nesting_level--;
1980 case GIMPLE_OMP_TASK:
1981 taskreg_nesting_level++;
1982 scan_omp_task (gsi, ctx);
1983 taskreg_nesting_level--;
1986 case GIMPLE_OMP_FOR:
1987 scan_omp_for (stmt, ctx);
1990 case GIMPLE_OMP_SECTIONS:
1991 scan_omp_sections (stmt, ctx);
1994 case GIMPLE_OMP_SINGLE:
1995 scan_omp_single (stmt, ctx);
1998 case GIMPLE_OMP_SECTION:
1999 case GIMPLE_OMP_MASTER:
2000 case GIMPLE_OMP_ORDERED:
2001 case GIMPLE_OMP_CRITICAL:
2002 ctx = new_omp_context (stmt, ctx);
2003 scan_omp (gimple_omp_body (stmt), ctx);
2010 *handled_ops_p = false;
2012 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2013 insert_decl_map (&ctx->cb, var, var);
2017 *handled_ops_p = false;
2025 /* Scan all the statements starting at the current statement. CTX
2026 contains context information about the OpenMP directives and
2027 clauses found during the scan. */
2030 scan_omp (gimple_seq body, omp_context *ctx)
2032 location_t saved_location;
2033 struct walk_stmt_info wi;
2035 memset (&wi, 0, sizeof (wi));
2037 wi.want_locations = true;
2039 saved_location = input_location;
2040 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2041 input_location = saved_location;
2044 /* Re-gimplification and code generation routines. */
2046 /* Build a call to GOMP_barrier. */
2049 build_omp_barrier (void)
2051 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2054 /* If a context was created for STMT when it was scanned, return it. */
2056 static omp_context *
2057 maybe_lookup_ctx (gimple stmt)
2060 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2061 return n ? (omp_context *) n->value : NULL;
2065 /* Find the mapping for DECL in CTX or the immediately enclosing
2066 context that has a mapping for DECL.
2068 If CTX is a nested parallel directive, we may have to use the decl
2069 mappings created in CTX's parent context. Suppose that we have the
2070 following parallel nesting (variable UIDs showed for clarity):
2073 #omp parallel shared(iD.1562) -> outer parallel
2074 iD.1562 = iD.1562 + 1;
2076 #omp parallel shared (iD.1562) -> inner parallel
2077 iD.1562 = iD.1562 - 1;
2079 Each parallel structure will create a distinct .omp_data_s structure
2080 for copying iD.1562 in/out of the directive:
2082 outer parallel .omp_data_s.1.i -> iD.1562
2083 inner parallel .omp_data_s.2.i -> iD.1562
2085 A shared variable mapping will produce a copy-out operation before
2086 the parallel directive and a copy-in operation after it. So, in
2087 this case we would have:
2090 .omp_data_o.1.i = iD.1562;
2091 #omp parallel shared(iD.1562) -> outer parallel
2092 .omp_data_i.1 = &.omp_data_o.1
2093 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2095 .omp_data_o.2.i = iD.1562; -> **
2096 #omp parallel shared(iD.1562) -> inner parallel
2097 .omp_data_i.2 = &.omp_data_o.2
2098 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2101 ** This is a problem. The symbol iD.1562 cannot be referenced
2102 inside the body of the outer parallel region. But since we are
2103 emitting this copy operation while expanding the inner parallel
2104 directive, we need to access the CTX structure of the outer
2105 parallel directive to get the correct mapping:
2107 .omp_data_o.2.i = .omp_data_i.1->i
2109 Since there may be other workshare or parallel directives enclosing
2110 the parallel directive, it may be necessary to walk up the context
2111 parent chain. This is not a problem in general because nested
2112 parallelism happens only rarely. */
2115 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2120 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2121 t = maybe_lookup_decl (decl, up);
2123 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2125 return t ? t : decl;
2129 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2130 in outer contexts. */
2133 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2138 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2139 t = maybe_lookup_decl (decl, up);
2141 return t ? t : decl;
2145 /* Construct the initialization value for reduction CLAUSE. */
2148 omp_reduction_init (tree clause, tree type)
2150 location_t loc = OMP_CLAUSE_LOCATION (clause);
2151 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2158 case TRUTH_ORIF_EXPR:
2159 case TRUTH_XOR_EXPR:
2161 return fold_convert_loc (loc, type, integer_zero_node);
2164 case TRUTH_AND_EXPR:
2165 case TRUTH_ANDIF_EXPR:
2167 return fold_convert_loc (loc, type, integer_one_node);
2170 return fold_convert_loc (loc, type, integer_minus_one_node);
2173 if (SCALAR_FLOAT_TYPE_P (type))
2175 REAL_VALUE_TYPE max, min;
2176 if (HONOR_INFINITIES (TYPE_MODE (type)))
2179 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2182 real_maxval (&min, 1, TYPE_MODE (type));
2183 return build_real (type, min);
2187 gcc_assert (INTEGRAL_TYPE_P (type));
2188 return TYPE_MIN_VALUE (type);
2192 if (SCALAR_FLOAT_TYPE_P (type))
2194 REAL_VALUE_TYPE max;
2195 if (HONOR_INFINITIES (TYPE_MODE (type)))
2198 real_maxval (&max, 0, TYPE_MODE (type));
2199 return build_real (type, max);
2203 gcc_assert (INTEGRAL_TYPE_P (type));
2204 return TYPE_MAX_VALUE (type);
2212 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2213 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2214 private variables. Initialization statements go in ILIST, while calls
2215 to destructors go in DLIST. */
2218 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2221 gimple_stmt_iterator diter;
2222 tree c, dtor, copyin_seq, x, ptr;
2223 bool copyin_by_ref = false;
2224 bool lastprivate_firstprivate = false;
2227 *dlist = gimple_seq_alloc ();
2228 diter = gsi_start (*dlist);
2231 /* Do all the fixed sized types in the first pass, and the variable sized
2232 types in the second pass. This makes sure that the scalar arguments to
2233 the variable sized types are processed before we use them in the
2234 variable sized operations. */
2235 for (pass = 0; pass < 2; ++pass)
2237 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2239 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2242 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2246 case OMP_CLAUSE_PRIVATE:
2247 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2250 case OMP_CLAUSE_SHARED:
2251 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2253 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2256 case OMP_CLAUSE_FIRSTPRIVATE:
2257 case OMP_CLAUSE_COPYIN:
2258 case OMP_CLAUSE_REDUCTION:
2260 case OMP_CLAUSE_LASTPRIVATE:
2261 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2263 lastprivate_firstprivate = true;
2272 new_var = var = OMP_CLAUSE_DECL (c);
2273 if (c_kind != OMP_CLAUSE_COPYIN)
2274 new_var = lookup_decl (var, ctx);
2276 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2281 else if (is_variable_sized (var))
2283 /* For variable sized types, we need to allocate the
2284 actual storage here. Call alloca and store the
2285 result in the pointer decl that we created elsewhere. */
2289 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2294 ptr = DECL_VALUE_EXPR (new_var);
2295 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2296 ptr = TREE_OPERAND (ptr, 0);
2297 gcc_assert (DECL_P (ptr));
2298 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2300 /* void *tmp = __builtin_alloca */
2302 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2303 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2304 gimple_add_tmp_var (tmp);
2305 gimple_call_set_lhs (stmt, tmp);
2307 gimple_seq_add_stmt (ilist, stmt);
2309 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2310 gimplify_assign (ptr, x, ilist);
2313 else if (is_reference (var))
2315 /* For references that are being privatized for Fortran,
2316 allocate new backing storage for the new pointer
2317 variable. This allows us to avoid changing all the
2318 code that expects a pointer to something that expects
2319 a direct variable. Note that this doesn't apply to
2320 C++, since reference types are disallowed in data
2321 sharing clauses there, except for NRV optimized
2326 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2327 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2329 x = build_receiver_ref (var, false, ctx);
2330 x = build_fold_addr_expr_loc (clause_loc, x);
2332 else if (TREE_CONSTANT (x))
2334 const char *name = NULL;
2335 if (DECL_NAME (var))
2336 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2338 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2340 gimple_add_tmp_var (x);
2341 TREE_ADDRESSABLE (x) = 1;
2342 x = build_fold_addr_expr_loc (clause_loc, x);
2346 x = build_call_expr_loc (clause_loc,
2347 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2350 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2351 gimplify_assign (new_var, x, ilist);
2353 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2355 else if (c_kind == OMP_CLAUSE_REDUCTION
2356 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2364 switch (OMP_CLAUSE_CODE (c))
2366 case OMP_CLAUSE_SHARED:
2367 /* Shared global vars are just accessed directly. */
2368 if (is_global_var (new_var))
2370 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2371 needs to be delayed until after fixup_child_record_type so
2372 that we get the correct type during the dereference. */
2373 by_ref = use_pointer_for_field (var, ctx);
2374 x = build_receiver_ref (var, by_ref, ctx);
2375 SET_DECL_VALUE_EXPR (new_var, x);
2376 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2378 /* ??? If VAR is not passed by reference, and the variable
2379 hasn't been initialized yet, then we'll get a warning for
2380 the store into the omp_data_s structure. Ideally, we'd be
2381 able to notice this and not store anything at all, but
2382 we're generating code too early. Suppress the warning. */
2384 TREE_NO_WARNING (var) = 1;
2387 case OMP_CLAUSE_LASTPRIVATE:
2388 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2392 case OMP_CLAUSE_PRIVATE:
2393 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2394 x = build_outer_var_ref (var, ctx);
2395 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2397 if (is_task_ctx (ctx))
2398 x = build_receiver_ref (var, false, ctx);
2400 x = build_outer_var_ref (var, ctx);
2404 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2406 gimplify_and_add (x, ilist);
2410 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2413 gimple_seq tseq = NULL;
2416 gimplify_stmt (&dtor, &tseq);
2417 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2421 case OMP_CLAUSE_FIRSTPRIVATE:
2422 if (is_task_ctx (ctx))
2424 if (is_reference (var) || is_variable_sized (var))
2426 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2428 || use_pointer_for_field (var, NULL))
2430 x = build_receiver_ref (var, false, ctx);
2431 SET_DECL_VALUE_EXPR (new_var, x);
2432 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2436 x = build_outer_var_ref (var, ctx);
2437 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2438 gimplify_and_add (x, ilist);
2442 case OMP_CLAUSE_COPYIN:
2443 by_ref = use_pointer_for_field (var, NULL);
2444 x = build_receiver_ref (var, by_ref, ctx);
2445 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2446 append_to_statement_list (x, ©in_seq);
2447 copyin_by_ref |= by_ref;
2450 case OMP_CLAUSE_REDUCTION:
2451 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2453 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2454 x = build_outer_var_ref (var, ctx);
2456 if (is_reference (var))
2457 x = build_fold_addr_expr_loc (clause_loc, x);
2458 SET_DECL_VALUE_EXPR (placeholder, x);
2459 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2460 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2461 gimple_seq_add_seq (ilist,
2462 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2463 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2464 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2468 x = omp_reduction_init (c, TREE_TYPE (new_var));
2469 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2470 gimplify_assign (new_var, x, ilist);
2480 /* The copyin sequence is not to be executed by the main thread, since
2481 that would result in self-copies. Perhaps not visible to scalars,
2482 but it certainly is to C++ operator=. */
2485 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2486 x = build2 (NE_EXPR, boolean_type_node, x,
2487 build_int_cst (TREE_TYPE (x), 0));
2488 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2489 gimplify_and_add (x, ilist);
2492 /* If any copyin variable is passed by reference, we must ensure the
2493 master thread doesn't modify it before it is copied over in all
2494 threads. Similarly for variables in both firstprivate and
2495 lastprivate clauses we need to ensure the lastprivate copying
2496 happens after firstprivate copying in all threads. */
2497 if (copyin_by_ref || lastprivate_firstprivate)
2498 gimplify_and_add (build_omp_barrier (), ilist);
2502 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2503 both parallel and workshare constructs. PREDICATE may be NULL if it's
2507 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2510 tree x, c, label = NULL;
2511 bool par_clauses = false;
2513 /* Early exit if there are no lastprivate clauses. */
2514 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2515 if (clauses == NULL)
2517 /* If this was a workshare clause, see if it had been combined
2518 with its parallel. In that case, look for the clauses on the
2519 parallel statement itself. */
2520 if (is_parallel_ctx (ctx))
2524 if (ctx == NULL || !is_parallel_ctx (ctx))
2527 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2528 OMP_CLAUSE_LASTPRIVATE);
2529 if (clauses == NULL)
2537 tree label_true, arm1, arm2;
2539 label = create_artificial_label (UNKNOWN_LOCATION);
2540 label_true = create_artificial_label (UNKNOWN_LOCATION);
2541 arm1 = TREE_OPERAND (predicate, 0);
2542 arm2 = TREE_OPERAND (predicate, 1);
2543 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2544 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2545 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2547 gimple_seq_add_stmt (stmt_list, stmt);
2548 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2551 for (c = clauses; c ;)
2554 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2556 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2558 var = OMP_CLAUSE_DECL (c);
2559 new_var = lookup_decl (var, ctx);
2561 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2563 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2564 gimple_seq_add_seq (stmt_list,
2565 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2567 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2569 x = build_outer_var_ref (var, ctx);
2570 if (is_reference (var))
2571 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2572 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2573 gimplify_and_add (x, stmt_list);
2575 c = OMP_CLAUSE_CHAIN (c);
2576 if (c == NULL && !par_clauses)
2578 /* If this was a workshare clause, see if it had been combined
2579 with its parallel. In that case, continue looking for the
2580 clauses also on the parallel statement itself. */
2581 if (is_parallel_ctx (ctx))
2585 if (ctx == NULL || !is_parallel_ctx (ctx))
2588 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2589 OMP_CLAUSE_LASTPRIVATE);
2595 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2599 /* Generate code to implement the REDUCTION clauses. */
2602 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2604 gimple_seq sub_seq = NULL;
2609 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2610 update in that case, otherwise use a lock. */
2611 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2612 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2614 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2616 /* Never use OMP_ATOMIC for array reductions. */
2626 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2628 tree var, ref, new_var;
2629 enum tree_code code;
2630 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2632 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2635 var = OMP_CLAUSE_DECL (c);
2636 new_var = lookup_decl (var, ctx);
2637 if (is_reference (var))
2638 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2639 ref = build_outer_var_ref (var, ctx);
2640 code = OMP_CLAUSE_REDUCTION_CODE (c);
2642 /* reduction(-:var) sums up the partial results, so it acts
2643 identically to reduction(+:var). */
2644 if (code == MINUS_EXPR)
2649 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2651 addr = save_expr (addr);
2652 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2653 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2654 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2655 gimplify_and_add (x, stmt_seqp);
2659 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2661 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2663 if (is_reference (var))
2664 ref = build_fold_addr_expr_loc (clause_loc, ref);
2665 SET_DECL_VALUE_EXPR (placeholder, ref);
2666 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2667 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2668 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2669 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2670 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2674 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2675 ref = build_outer_var_ref (var, ctx);
2676 gimplify_assign (ref, x, &sub_seq);
2680 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2681 gimple_seq_add_stmt (stmt_seqp, stmt);
2683 gimple_seq_add_seq (stmt_seqp, sub_seq);
2685 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2686 gimple_seq_add_stmt (stmt_seqp, stmt);
2690 /* Generate code to implement the COPYPRIVATE clauses. */
2693 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2698 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2700 tree var, new_var, ref, x;
2702 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2704 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2707 var = OMP_CLAUSE_DECL (c);
2708 by_ref = use_pointer_for_field (var, NULL);
2710 ref = build_sender_ref (var, ctx);
2711 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2714 x = build_fold_addr_expr_loc (clause_loc, new_var);
2715 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2717 gimplify_assign (ref, x, slist);
2719 ref = build_receiver_ref (var, false, ctx);
2722 ref = fold_convert_loc (clause_loc,
2723 build_pointer_type (TREE_TYPE (new_var)),
2725 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2727 if (is_reference (var))
2729 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2730 ref = build_simple_mem_ref_loc (clause_loc, ref);
2731 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2733 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2734 gimplify_and_add (x, rlist);
2739 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2740 and REDUCTION from the sender (aka parent) side. */
2743 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2748 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2750 tree val, ref, x, var;
2751 bool by_ref, do_in = false, do_out = false;
2752 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2754 switch (OMP_CLAUSE_CODE (c))
2756 case OMP_CLAUSE_PRIVATE:
2757 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2760 case OMP_CLAUSE_FIRSTPRIVATE:
2761 case OMP_CLAUSE_COPYIN:
2762 case OMP_CLAUSE_LASTPRIVATE:
2763 case OMP_CLAUSE_REDUCTION:
2769 val = OMP_CLAUSE_DECL (c);
2770 var = lookup_decl_in_outer_ctx (val, ctx);
2772 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2773 && is_global_var (var))
2775 if (is_variable_sized (val))
2777 by_ref = use_pointer_for_field (val, NULL);
2779 switch (OMP_CLAUSE_CODE (c))
2781 case OMP_CLAUSE_PRIVATE:
2782 case OMP_CLAUSE_FIRSTPRIVATE:
2783 case OMP_CLAUSE_COPYIN:
2787 case OMP_CLAUSE_LASTPRIVATE:
2788 if (by_ref || is_reference (val))
2790 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2797 if (lang_hooks.decls.omp_private_outer_ref (val))
2802 case OMP_CLAUSE_REDUCTION:
2804 do_out = !(by_ref || is_reference (val));
2813 ref = build_sender_ref (val, ctx);
2814 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2815 gimplify_assign (ref, x, ilist);
2816 if (is_task_ctx (ctx))
2817 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2822 ref = build_sender_ref (val, ctx);
2823 gimplify_assign (var, ref, olist);
2828 /* Generate code to implement SHARED from the sender (aka parent)
2829 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2830 list things that got automatically shared. */
2833 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2835 tree var, ovar, nvar, f, x, record_type;
2837 if (ctx->record_type == NULL)
2840 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2841 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2843 ovar = DECL_ABSTRACT_ORIGIN (f);
2844 nvar = maybe_lookup_decl (ovar, ctx);
2845 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2848 /* If CTX is a nested parallel directive. Find the immediately
2849 enclosing parallel or workshare construct that contains a
2850 mapping for OVAR. */
2851 var = lookup_decl_in_outer_ctx (ovar, ctx);
2853 if (use_pointer_for_field (ovar, ctx))
2855 x = build_sender_ref (ovar, ctx);
2856 var = build_fold_addr_expr (var);
2857 gimplify_assign (x, var, ilist);
2861 x = build_sender_ref (ovar, ctx);
2862 gimplify_assign (x, var, ilist);
2864 if (!TREE_READONLY (var)
2865 /* We don't need to receive a new reference to a result
2866 or parm decl. In fact we may not store to it as we will
2867 invalidate any pending RSO and generate wrong gimple
2869 && !((TREE_CODE (var) == RESULT_DECL
2870 || TREE_CODE (var) == PARM_DECL)
2871 && DECL_BY_REFERENCE (var)))
2873 x = build_sender_ref (ovar, ctx);
2874 gimplify_assign (var, x, olist);
2881 /* A convenience function to build an empty GIMPLE_COND with just the
2885 gimple_build_cond_empty (tree cond)
2887 enum tree_code pred_code;
2890 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2891 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2895 /* Build the function calls to GOMP_parallel_start etc to actually
2896 generate the parallel operation. REGION is the parallel region
2897 being expanded. BB is the block where to insert the code. WS_ARGS
2898 will be set if this is a call to a combined parallel+workshare
2899 construct, it contains the list of additional arguments needed by
2900 the workshare construct. */
2903 expand_parallel_call (struct omp_region *region, basic_block bb,
2904 gimple entry_stmt, VEC(tree,gc) *ws_args)
2906 tree t, t1, t2, val, cond, c, clauses;
2907 gimple_stmt_iterator gsi;
2910 location_t clause_loc;
2913 clauses = gimple_omp_parallel_clauses (entry_stmt);
2915 /* Determine what flavor of GOMP_parallel_start we will be
2917 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2918 if (is_combined_parallel (region))
2920 switch (region->inner->type)
2922 case GIMPLE_OMP_FOR:
2923 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2924 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2925 + (region->inner->sched_kind
2926 == OMP_CLAUSE_SCHEDULE_RUNTIME
2927 ? 3 : region->inner->sched_kind);
2929 case GIMPLE_OMP_SECTIONS:
2930 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2937 /* By default, the value of NUM_THREADS is zero (selected at run time)
2938 and there is no conditional. */
2940 val = build_int_cst (unsigned_type_node, 0);
2942 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2944 cond = OMP_CLAUSE_IF_EXPR (c);
2946 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2949 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2950 clause_loc = OMP_CLAUSE_LOCATION (c);
2953 clause_loc = gimple_location (entry_stmt);
2955 /* Ensure 'val' is of the correct type. */
2956 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2958 /* If we found the clause 'if (cond)', build either
2959 (cond != 0) or (cond ? val : 1u). */
2962 gimple_stmt_iterator gsi;
2964 cond = gimple_boolify (cond);
2966 if (integer_zerop (val))
2967 val = fold_build2_loc (clause_loc,
2968 EQ_EXPR, unsigned_type_node, cond,
2969 build_int_cst (TREE_TYPE (cond), 0));
2972 basic_block cond_bb, then_bb, else_bb;
2973 edge e, e_then, e_else;
2974 tree tmp_then, tmp_else, tmp_join, tmp_var;
2976 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2977 if (gimple_in_ssa_p (cfun))
2979 tmp_then = make_ssa_name (tmp_var, NULL);
2980 tmp_else = make_ssa_name (tmp_var, NULL);
2981 tmp_join = make_ssa_name (tmp_var, NULL);
2990 e = split_block (bb, NULL);
2995 then_bb = create_empty_bb (cond_bb);
2996 else_bb = create_empty_bb (then_bb);
2997 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2998 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3000 stmt = gimple_build_cond_empty (cond);
3001 gsi = gsi_start_bb (cond_bb);
3002 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3004 gsi = gsi_start_bb (then_bb);
3005 stmt = gimple_build_assign (tmp_then, val);
3006 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3008 gsi = gsi_start_bb (else_bb);
3009 stmt = gimple_build_assign
3010 (tmp_else, build_int_cst (unsigned_type_node, 1));
3011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3013 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3014 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3015 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3016 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3018 if (gimple_in_ssa_p (cfun))
3020 gimple phi = create_phi_node (tmp_join, bb);
3021 SSA_NAME_DEF_STMT (tmp_join) = phi;
3022 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3023 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3029 gsi = gsi_start_bb (bb);
3030 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3031 false, GSI_CONTINUE_LINKING);
3034 gsi = gsi_last_bb (bb);
3035 t = gimple_omp_parallel_data_arg (entry_stmt);
3037 t1 = null_pointer_node;
3039 t1 = build_fold_addr_expr (t);
3040 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3042 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3043 VEC_quick_push (tree, args, t2);
3044 VEC_quick_push (tree, args, t1);
3045 VEC_quick_push (tree, args, val);
3046 VEC_splice (tree, args, ws_args);
3048 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3049 built_in_decls[start_ix], args);
3051 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3052 false, GSI_CONTINUE_LINKING);
3054 t = gimple_omp_parallel_data_arg (entry_stmt);
3056 t = null_pointer_node;
3058 t = build_fold_addr_expr (t);
3059 t = build_call_expr_loc (gimple_location (entry_stmt),
3060 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3061 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3062 false, GSI_CONTINUE_LINKING);
3064 t = build_call_expr_loc (gimple_location (entry_stmt),
3065 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3066 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3067 false, GSI_CONTINUE_LINKING);
3071 /* Build the function call to GOMP_task to actually
3072 generate the task operation. BB is the block where to insert the code. */
3075 expand_task_call (basic_block bb, gimple entry_stmt)
3077 tree t, t1, t2, t3, flags, cond, c, clauses;
3078 gimple_stmt_iterator gsi;
3079 location_t loc = gimple_location (entry_stmt);
3081 clauses = gimple_omp_task_clauses (entry_stmt);
3083 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3085 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3087 cond = boolean_true_node;
3089 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3090 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3092 gsi = gsi_last_bb (bb);
3093 t = gimple_omp_task_data_arg (entry_stmt);
3095 t2 = null_pointer_node;
3097 t2 = build_fold_addr_expr_loc (loc, t);
3098 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3099 t = gimple_omp_task_copy_fn (entry_stmt);
3101 t3 = null_pointer_node;
3103 t3 = build_fold_addr_expr_loc (loc, t);
3105 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3106 gimple_omp_task_arg_size (entry_stmt),
3107 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3109 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3110 false, GSI_CONTINUE_LINKING);
3114 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3115 catch handler and return it. This prevents programs from violating the
3116 structured block semantics with throws. */
3119 maybe_catch_exception (gimple_seq body)
3124 if (!flag_exceptions)
3127 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3128 decl = lang_hooks.eh_protect_cleanup_actions ();
3130 decl = built_in_decls[BUILT_IN_TRAP];
3132 g = gimple_build_eh_must_not_throw (decl);
3133 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3136 return gimple_seq_alloc_with_stmt (g);
3139 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3142 vec2chain (VEC(tree,gc) *v)
3144 tree chain = NULL_TREE, t;
3147 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3149 TREE_CHAIN (t) = chain;
3157 /* Remove barriers in REGION->EXIT's block. Note that this is only
3158 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3159 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3160 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3164 remove_exit_barrier (struct omp_region *region)
3166 gimple_stmt_iterator gsi;
3167 basic_block exit_bb;
3171 int any_addressable_vars = -1;
3173 exit_bb = region->exit;
3175 /* If the parallel region doesn't return, we don't have REGION->EXIT
3180 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3181 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3182 statements that can appear in between are extremely limited -- no
3183 memory operations at all. Here, we allow nothing at all, so the
3184 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3185 gsi = gsi_last_bb (exit_bb);
3186 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3188 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3191 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3193 gsi = gsi_last_bb (e->src);
3194 if (gsi_end_p (gsi))
3196 stmt = gsi_stmt (gsi);
3197 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3198 && !gimple_omp_return_nowait_p (stmt))
3200 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3201 in many cases. If there could be tasks queued, the barrier
3202 might be needed to let the tasks run before some local
3203 variable of the parallel that the task uses as shared
3204 runs out of scope. The task can be spawned either
3205 from within current function (this would be easy to check)
3206 or from some function it calls and gets passed an address
3207 of such a variable. */
3208 if (any_addressable_vars < 0)
3210 gimple parallel_stmt = last_stmt (region->entry);
3211 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3212 tree local_decls, block, decl;
3215 any_addressable_vars = 0;
3216 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3217 if (TREE_ADDRESSABLE (decl))
3219 any_addressable_vars = 1;
3222 for (block = gimple_block (stmt);
3223 !any_addressable_vars
3225 && TREE_CODE (block) == BLOCK;
3226 block = BLOCK_SUPERCONTEXT (block))
3228 for (local_decls = BLOCK_VARS (block);
3230 local_decls = TREE_CHAIN (local_decls))
3231 if (TREE_ADDRESSABLE (local_decls))
3233 any_addressable_vars = 1;
3236 if (block == gimple_block (parallel_stmt))
3240 if (!any_addressable_vars)
3241 gimple_omp_return_set_nowait (stmt);
3247 remove_exit_barriers (struct omp_region *region)
3249 if (region->type == GIMPLE_OMP_PARALLEL)
3250 remove_exit_barrier (region);
3254 region = region->inner;
3255 remove_exit_barriers (region);
3256 while (region->next)
3258 region = region->next;
3259 remove_exit_barriers (region);
3264 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3265 calls. These can't be declared as const functions, but
3266 within one parallel body they are constant, so they can be
3267 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3268 which are declared const. Similarly for task body, except
3269 that in untied task omp_get_thread_num () can change at any task
3270 scheduling point. */
3273 optimize_omp_library_calls (gimple entry_stmt)
3276 gimple_stmt_iterator gsi;
3278 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3280 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3281 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3282 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3283 OMP_CLAUSE_UNTIED) != NULL);
3286 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3288 gimple call = gsi_stmt (gsi);
3291 if (is_gimple_call (call)
3292 && (decl = gimple_call_fndecl (call))
3293 && DECL_EXTERNAL (decl)
3294 && TREE_PUBLIC (decl)
3295 && DECL_INITIAL (decl) == NULL)
3299 if (DECL_NAME (decl) == thr_num_id)
3301 /* In #pragma omp task untied omp_get_thread_num () can change
3302 during the execution of the task region. */
3305 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3307 else if (DECL_NAME (decl) == num_thr_id)
3308 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3312 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3313 || gimple_call_num_args (call) != 0)
3316 if (flag_exceptions && !TREE_NOTHROW (decl))
3319 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3320 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3321 TREE_TYPE (TREE_TYPE (built_in))))
3324 gimple_call_set_fndecl (call, built_in);
3329 /* Expand the OpenMP parallel or task directive starting at REGION. */
3332 expand_omp_taskreg (struct omp_region *region)
3334 basic_block entry_bb, exit_bb, new_bb;
3335 struct function *child_cfun;
3336 tree child_fn, block, t;
3338 gimple_stmt_iterator gsi;
3339 gimple entry_stmt, stmt;
3341 VEC(tree,gc) *ws_args;
3343 entry_stmt = last_stmt (region->entry);
3344 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3345 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3346 /* If this function has been already instrumented, make sure
3347 the child function isn't instrumented again. */
3348 child_cfun->after_tree_profile = cfun->after_tree_profile;
3350 entry_bb = region->entry;
3351 exit_bb = region->exit;
3353 if (is_combined_parallel (region))
3354 ws_args = region->ws_args;
3358 if (child_cfun->cfg)
3360 /* Due to inlining, it may happen that we have already outlined
3361 the region, in which case all we need to do is make the
3362 sub-graph unreachable and emit the parallel call. */
3363 edge entry_succ_e, exit_succ_e;
3364 gimple_stmt_iterator gsi;
3366 entry_succ_e = single_succ_edge (entry_bb);
3368 gsi = gsi_last_bb (entry_bb);
3369 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3370 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3371 gsi_remove (&gsi, true);
3376 exit_succ_e = single_succ_edge (exit_bb);
3377 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3379 remove_edge_and_dominated_blocks (entry_succ_e);
3383 unsigned srcidx, dstidx, num;
3385 /* If the parallel region needs data sent from the parent
3386 function, then the very first statement (except possible
3387 tree profile counter updates) of the parallel body
3388 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3389 &.OMP_DATA_O is passed as an argument to the child function,
3390 we need to replace it with the argument as seen by the child
3393 In most cases, this will end up being the identity assignment
3394 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3395 a function call that has been inlined, the original PARM_DECL
3396 .OMP_DATA_I may have been converted into a different local
3397 variable. In which case, we need to keep the assignment. */
3398 if (gimple_omp_taskreg_data_arg (entry_stmt))
3400 basic_block entry_succ_bb = single_succ (entry_bb);
3401 gimple_stmt_iterator gsi;
3403 gimple parcopy_stmt = NULL;
3405 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3409 gcc_assert (!gsi_end_p (gsi));
3410 stmt = gsi_stmt (gsi);
3411 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3414 if (gimple_num_ops (stmt) == 2)
3416 tree arg = gimple_assign_rhs1 (stmt);
3418 /* We're ignore the subcode because we're
3419 effectively doing a STRIP_NOPS. */
3421 if (TREE_CODE (arg) == ADDR_EXPR
3422 && TREE_OPERAND (arg, 0)
3423 == gimple_omp_taskreg_data_arg (entry_stmt))
3425 parcopy_stmt = stmt;
3431 gcc_assert (parcopy_stmt != NULL);
3432 arg = DECL_ARGUMENTS (child_fn);
3434 if (!gimple_in_ssa_p (cfun))
3436 if (gimple_assign_lhs (parcopy_stmt) == arg)
3437 gsi_remove (&gsi, true);
3440 /* ?? Is setting the subcode really necessary ?? */
3441 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3442 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3447 /* If we are in ssa form, we must load the value from the default
3448 definition of the argument. That should not be defined now,
3449 since the argument is not used uninitialized. */
3450 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3451 narg = make_ssa_name (arg, gimple_build_nop ());
3452 set_default_def (arg, narg);
3453 /* ?? Is setting the subcode really necessary ?? */
3454 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3455 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3456 update_stmt (parcopy_stmt);
3460 /* Declare local variables needed in CHILD_CFUN. */
3461 block = DECL_INITIAL (child_fn);
3462 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3463 /* The gimplifier could record temporaries in parallel/task block
3464 rather than in containing function's local_decls chain,
3465 which would mean cgraph missed finalizing them. Do it now. */
3466 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3467 if (TREE_CODE (t) == VAR_DECL
3469 && !DECL_EXTERNAL (t))
3470 varpool_finalize_decl (t);
3471 DECL_SAVED_TREE (child_fn) = NULL;
3472 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3473 TREE_USED (block) = 1;
3475 /* Reset DECL_CONTEXT on function arguments. */
3476 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3477 DECL_CONTEXT (t) = child_fn;
3479 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3480 so that it can be moved to the child function. */
3481 gsi = gsi_last_bb (entry_bb);
3482 stmt = gsi_stmt (gsi);
3483 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3484 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3485 gsi_remove (&gsi, true);
3486 e = split_block (entry_bb, stmt);
3488 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3490 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3493 gsi = gsi_last_bb (exit_bb);
3494 gcc_assert (!gsi_end_p (gsi)
3495 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3496 stmt = gimple_build_return (NULL);
3497 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3498 gsi_remove (&gsi, true);
3501 /* Move the parallel region into CHILD_CFUN. */
3503 if (gimple_in_ssa_p (cfun))
3505 push_cfun (child_cfun);
3506 init_tree_ssa (child_cfun);
3507 init_ssa_operands ();
3508 cfun->gimple_df->in_ssa_p = true;
3513 block = gimple_block (entry_stmt);
3515 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3517 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3519 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3520 num = VEC_length (tree, child_cfun->local_decls);
3521 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3523 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3524 if (DECL_CONTEXT (t) == cfun->decl)
3526 if (srcidx != dstidx)
3527 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3531 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3533 /* Inform the callgraph about the new function. */
3534 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3535 = cfun->curr_properties;
3536 cgraph_add_new_function (child_fn, true);
3538 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3539 fixed in a following pass. */
3540 push_cfun (child_cfun);
3541 save_current = current_function_decl;
3542 current_function_decl = child_fn;
3544 optimize_omp_library_calls (entry_stmt);
3545 rebuild_cgraph_edges ();
3547 /* Some EH regions might become dead, see PR34608. If
3548 pass_cleanup_cfg isn't the first pass to happen with the
3549 new child, these dead EH edges might cause problems.
3550 Clean them up now. */
3551 if (flag_exceptions)
3554 bool changed = false;
3557 changed |= gimple_purge_dead_eh_edges (bb);
3559 cleanup_tree_cfg ();
3561 if (gimple_in_ssa_p (cfun))
3562 update_ssa (TODO_update_ssa);
3563 current_function_decl = save_current;
3567 /* Emit a library call to launch the children threads. */
3568 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3569 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3571 expand_task_call (new_bb, entry_stmt);
3572 update_ssa (TODO_update_ssa_only_virtuals);
3576 /* A subroutine of expand_omp_for. Generate code for a parallel
3577 loop with any schedule. Given parameters:
3579 for (V = N1; V cond N2; V += STEP) BODY;
3581 where COND is "<" or ">", we generate pseudocode
3583 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3584 if (more) goto L0; else goto L3;
3591 if (V cond iend) goto L1; else goto L2;
3593 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3596 If this is a combined omp parallel loop, instead of the call to
3597 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3599 For collapsed loops, given parameters:
3601 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3602 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3603 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3606 we generate pseudocode
3612 count3 = (adj + N32 - N31) / STEP3;
3617 count2 = (adj + N22 - N21) / STEP2;
3622 count1 = (adj + N12 - N11) / STEP1;
3623 count = count1 * count2 * count3;
3624 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3625 if (more) goto L0; else goto L3;
3629 V3 = N31 + (T % count3) * STEP3;
3631 V2 = N21 + (T % count2) * STEP2;
3633 V1 = N11 + T * STEP1;
3638 if (V < iend) goto L10; else goto L2;
3641 if (V3 cond3 N32) goto L1; else goto L11;
3645 if (V2 cond2 N22) goto L1; else goto L12;
3651 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3657 expand_omp_for_generic (struct omp_region *region,
3658 struct omp_for_data *fd,
3659 enum built_in_function start_fn,
3660 enum built_in_function next_fn)
3662 tree type, istart0, iend0, iend;
3663 tree t, vmain, vback, bias = NULL_TREE;
3664 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3665 basic_block l2_bb = NULL, l3_bb = NULL;
3666 gimple_stmt_iterator gsi;
3668 bool in_combined_parallel = is_combined_parallel (region);
3669 bool broken_loop = region->cont == NULL;
3671 tree *counts = NULL;
3674 gcc_assert (!broken_loop || !in_combined_parallel);
3675 gcc_assert (fd->iter_type == long_integer_type_node
3676 || !in_combined_parallel);
3678 type = TREE_TYPE (fd->loop.v);
3679 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3680 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3681 TREE_ADDRESSABLE (istart0) = 1;
3682 TREE_ADDRESSABLE (iend0) = 1;
3683 if (gimple_in_ssa_p (cfun))
3685 add_referenced_var (istart0);
3686 add_referenced_var (iend0);
3689 /* See if we need to bias by LLONG_MIN. */
3690 if (fd->iter_type == long_long_unsigned_type_node
3691 && TREE_CODE (type) == INTEGER_TYPE
3692 && !TYPE_UNSIGNED (type))
3696 if (fd->loop.cond_code == LT_EXPR)
3699 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3703 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3706 if (TREE_CODE (n1) != INTEGER_CST
3707 || TREE_CODE (n2) != INTEGER_CST
3708 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3709 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3712 entry_bb = region->entry;
3713 cont_bb = region->cont;
3715 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3716 gcc_assert (broken_loop
3717 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3718 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3719 l1_bb = single_succ (l0_bb);
3722 l2_bb = create_empty_bb (cont_bb);
3723 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3724 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3728 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3729 exit_bb = region->exit;
3731 gsi = gsi_last_bb (entry_bb);
3733 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3734 if (fd->collapse > 1)
3736 /* collapsed loops need work for expansion in SSA form. */
3737 gcc_assert (!gimple_in_ssa_p (cfun));
3738 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3739 for (i = 0; i < fd->collapse; i++)
3741 tree itype = TREE_TYPE (fd->loops[i].v);
3743 if (POINTER_TYPE_P (itype))
3744 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3745 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3747 t = fold_build2 (PLUS_EXPR, itype,
3748 fold_convert (itype, fd->loops[i].step), t);
3749 t = fold_build2 (PLUS_EXPR, itype, t,
3750 fold_convert (itype, fd->loops[i].n2));
3751 t = fold_build2 (MINUS_EXPR, itype, t,
3752 fold_convert (itype, fd->loops[i].n1));
3753 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3754 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3755 fold_build1 (NEGATE_EXPR, itype, t),
3756 fold_build1 (NEGATE_EXPR, itype,
3757 fold_convert (itype,
3758 fd->loops[i].step)));
3760 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3761 fold_convert (itype, fd->loops[i].step));
3762 t = fold_convert (type, t);
3763 if (TREE_CODE (t) == INTEGER_CST)
3767 counts[i] = create_tmp_var (type, ".count");
3768 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3769 true, GSI_SAME_STMT);
3770 stmt = gimple_build_assign (counts[i], t);
3771 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3773 if (SSA_VAR_P (fd->loop.n2))
3779 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3780 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3781 true, GSI_SAME_STMT);
3783 stmt = gimple_build_assign (fd->loop.n2, t);
3784 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3788 if (in_combined_parallel)
3790 /* In a combined parallel loop, emit a call to
3791 GOMP_loop_foo_next. */
3792 t = build_call_expr (built_in_decls[next_fn], 2,
3793 build_fold_addr_expr (istart0),
3794 build_fold_addr_expr (iend0));
3798 tree t0, t1, t2, t3, t4;
3799 /* If this is not a combined parallel loop, emit a call to
3800 GOMP_loop_foo_start in ENTRY_BB. */
3801 t4 = build_fold_addr_expr (iend0);
3802 t3 = build_fold_addr_expr (istart0);
3803 t2 = fold_convert (fd->iter_type, fd->loop.step);
3804 if (POINTER_TYPE_P (type)
3805 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3807 /* Avoid casting pointers to integer of a different size. */
3809 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3810 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3811 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3815 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3816 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3820 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3821 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3823 if (fd->iter_type == long_integer_type_node)
3827 t = fold_convert (fd->iter_type, fd->chunk_size);
3828 t = build_call_expr (built_in_decls[start_fn], 6,
3829 t0, t1, t2, t, t3, t4);
3832 t = build_call_expr (built_in_decls[start_fn], 5,
3833 t0, t1, t2, t3, t4);
3840 /* The GOMP_loop_ull_*start functions have additional boolean
3841 argument, true for < loops and false for > loops.
3842 In Fortran, the C bool type can be different from
3843 boolean_type_node. */
3844 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3845 t5 = build_int_cst (c_bool_type,
3846 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3849 t = fold_convert (fd->iter_type, fd->chunk_size);
3850 t = build_call_expr (built_in_decls[start_fn], 7,
3851 t5, t0, t1, t2, t, t3, t4);
3854 t = build_call_expr (built_in_decls[start_fn], 6,
3855 t5, t0, t1, t2, t3, t4);
3858 if (TREE_TYPE (t) != boolean_type_node)
3859 t = fold_build2 (NE_EXPR, boolean_type_node,
3860 t, build_int_cst (TREE_TYPE (t), 0));
3861 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3862 true, GSI_SAME_STMT);
3863 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3865 /* Remove the GIMPLE_OMP_FOR statement. */
3866 gsi_remove (&gsi, true);
3868 /* Iteration setup for sequential loop goes in L0_BB. */
3869 gsi = gsi_start_bb (l0_bb);
3872 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3873 if (POINTER_TYPE_P (type))
3874 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3876 t = fold_convert (type, t);
3877 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3878 false, GSI_CONTINUE_LINKING);
3879 stmt = gimple_build_assign (fd->loop.v, t);
3880 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3884 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3885 if (POINTER_TYPE_P (type))
3886 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3888 t = fold_convert (type, t);
3889 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3890 false, GSI_CONTINUE_LINKING);
3891 if (fd->collapse > 1)
3893 tree tem = create_tmp_var (type, ".tem");
3895 stmt = gimple_build_assign (tem, fd->loop.v);
3896 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3897 for (i = fd->collapse - 1; i >= 0; i--)
3899 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3901 if (POINTER_TYPE_P (vtype))
3902 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3903 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3904 t = fold_convert (itype, t);
3905 t = fold_build2 (MULT_EXPR, itype, t,
3906 fold_convert (itype, fd->loops[i].step));
3907 if (POINTER_TYPE_P (vtype))
3908 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3909 fd->loops[i].n1, fold_convert (sizetype, t));
3911 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3912 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3913 false, GSI_CONTINUE_LINKING);
3914 stmt = gimple_build_assign (fd->loops[i].v, t);
3915 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3918 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3919 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3920 false, GSI_CONTINUE_LINKING);
3921 stmt = gimple_build_assign (tem, t);
3922 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3929 /* Code to control the increment and predicate for the sequential
3930 loop goes in the CONT_BB. */
3931 gsi = gsi_last_bb (cont_bb);
3932 stmt = gsi_stmt (gsi);
3933 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3934 vmain = gimple_omp_continue_control_use (stmt);
3935 vback = gimple_omp_continue_control_def (stmt);
3937 if (POINTER_TYPE_P (type))
3938 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3939 fold_convert (sizetype, fd->loop.step));
3941 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3942 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3943 true, GSI_SAME_STMT);
3944 stmt = gimple_build_assign (vback, t);
3945 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3947 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3948 stmt = gimple_build_cond_empty (t);
3949 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3951 /* Remove GIMPLE_OMP_CONTINUE. */
3952 gsi_remove (&gsi, true);
3954 if (fd->collapse > 1)
3956 basic_block last_bb, bb;
3959 for (i = fd->collapse - 1; i >= 0; i--)
3961 tree vtype = TREE_TYPE (fd->loops[i].v);
3963 bb = create_empty_bb (last_bb);
3964 gsi = gsi_start_bb (bb);
3966 if (i < fd->collapse - 1)
3968 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3969 e->probability = REG_BR_PROB_BASE / 8;
3971 t = fd->loops[i + 1].n1;
3972 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3973 false, GSI_CONTINUE_LINKING);
3974 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3975 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3980 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3982 if (POINTER_TYPE_P (vtype))
3983 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3985 fold_convert (sizetype, fd->loops[i].step));
3987 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3989 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3990 false, GSI_CONTINUE_LINKING);
3991 stmt = gimple_build_assign (fd->loops[i].v, t);
3992 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3996 t = fd->loops[i].n2;
3997 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3998 false, GSI_CONTINUE_LINKING);
3999 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4001 stmt = gimple_build_cond_empty (t);
4002 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4003 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4004 e->probability = REG_BR_PROB_BASE * 7 / 8;
4007 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4012 /* Emit code to get the next parallel iteration in L2_BB. */
4013 gsi = gsi_start_bb (l2_bb);
4015 t = build_call_expr (built_in_decls[next_fn], 2,
4016 build_fold_addr_expr (istart0),
4017 build_fold_addr_expr (iend0));
4018 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4019 false, GSI_CONTINUE_LINKING);
4020 if (TREE_TYPE (t) != boolean_type_node)
4021 t = fold_build2 (NE_EXPR, boolean_type_node,
4022 t, build_int_cst (TREE_TYPE (t), 0));
4023 stmt = gimple_build_cond_empty (t);
4024 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4027 /* Add the loop cleanup function. */
4028 gsi = gsi_last_bb (exit_bb);
4029 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4030 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4032 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4033 stmt = gimple_build_call (t, 0);
4034 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4035 gsi_remove (&gsi, true);
4037 /* Connect the new blocks. */
4038 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4039 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4045 e = find_edge (cont_bb, l3_bb);
4046 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4048 phis = phi_nodes (l3_bb);
4049 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4051 gimple phi = gsi_stmt (gsi);
4052 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4053 PHI_ARG_DEF_FROM_EDGE (phi, e));
4057 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4058 if (fd->collapse > 1)
4060 e = find_edge (cont_bb, l1_bb);
4062 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4066 e = find_edge (cont_bb, l1_bb);
4067 e->flags = EDGE_TRUE_VALUE;
4069 e->probability = REG_BR_PROB_BASE * 7 / 8;
4070 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4071 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4073 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4074 recompute_dominator (CDI_DOMINATORS, l2_bb));
4075 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4076 recompute_dominator (CDI_DOMINATORS, l3_bb));
4077 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4078 recompute_dominator (CDI_DOMINATORS, l0_bb));
4079 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4080 recompute_dominator (CDI_DOMINATORS, l1_bb));
4085 /* A subroutine of expand_omp_for. Generate code for a parallel
4086 loop with static schedule and no specified chunk size. Given
4089 for (V = N1; V cond N2; V += STEP) BODY;
4091 where COND is "<" or ">", we generate pseudocode
4097 if ((__typeof (V)) -1 > 0 && cond is >)
4098 n = -(adj + N2 - N1) / -STEP;
4100 n = (adj + N2 - N1) / STEP;
4102 q += (q * nthreads != n);
4104 e0 = min(s0 + q, n);
4106 if (s0 >= e0) goto L2; else goto L0;
4112 if (V cond e) goto L1;
4117 expand_omp_for_static_nochunk (struct omp_region *region,
4118 struct omp_for_data *fd)
4120 tree n, q, s0, e0, e, t, nthreads, threadid;
4121 tree type, itype, vmain, vback;
4122 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4124 gimple_stmt_iterator gsi;
4127 itype = type = TREE_TYPE (fd->loop.v);
4128 if (POINTER_TYPE_P (type))
4129 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4131 entry_bb = region->entry;
4132 cont_bb = region->cont;
4133 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4134 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4135 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4136 body_bb = single_succ (seq_start_bb);
4137 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4138 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4139 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4140 exit_bb = region->exit;
4142 /* Iteration space partitioning goes in ENTRY_BB. */
4143 gsi = gsi_last_bb (entry_bb);
4144 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4146 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4147 t = fold_convert (itype, t);
4148 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4149 true, GSI_SAME_STMT);
4151 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4152 t = fold_convert (itype, t);
4153 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4154 true, GSI_SAME_STMT);
4157 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4158 true, NULL_TREE, true, GSI_SAME_STMT);
4160 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4161 true, NULL_TREE, true, GSI_SAME_STMT);
4163 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4164 true, NULL_TREE, true, GSI_SAME_STMT);
4166 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4167 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4168 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4169 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4170 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4171 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4172 fold_build1 (NEGATE_EXPR, itype, t),
4173 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4175 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4176 t = fold_convert (itype, t);
4177 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4179 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4180 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4182 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4183 t = fold_build2 (NE_EXPR, itype, t, n);
4184 t = fold_build2 (PLUS_EXPR, itype, q, t);
4185 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4187 t = build2 (MULT_EXPR, itype, q, threadid);
4188 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4190 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4191 t = fold_build2 (MIN_EXPR, itype, t, n);
4192 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4194 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4195 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4197 /* Remove the GIMPLE_OMP_FOR statement. */
4198 gsi_remove (&gsi, true);
4200 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4201 gsi = gsi_start_bb (seq_start_bb);
4203 t = fold_convert (itype, s0);
4204 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4205 if (POINTER_TYPE_P (type))
4206 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4207 fold_convert (sizetype, t));
4209 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4210 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4211 false, GSI_CONTINUE_LINKING);
4212 stmt = gimple_build_assign (fd->loop.v, t);
4213 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4215 t = fold_convert (itype, e0);
4216 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4217 if (POINTER_TYPE_P (type))
4218 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4219 fold_convert (sizetype, t));
4221 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4222 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4223 false, GSI_CONTINUE_LINKING);
4225 /* The code controlling the sequential loop replaces the
4226 GIMPLE_OMP_CONTINUE. */
4227 gsi = gsi_last_bb (cont_bb);
4228 stmt = gsi_stmt (gsi);
4229 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4230 vmain = gimple_omp_continue_control_use (stmt);
4231 vback = gimple_omp_continue_control_def (stmt);
4233 if (POINTER_TYPE_P (type))
4234 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4235 fold_convert (sizetype, fd->loop.step));
4237 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4238 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4239 true, GSI_SAME_STMT);
4240 stmt = gimple_build_assign (vback, t);
4241 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4243 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4244 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4246 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4247 gsi_remove (&gsi, true);
4249 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4250 gsi = gsi_last_bb (exit_bb);
4251 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4252 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4253 false, GSI_SAME_STMT);
4254 gsi_remove (&gsi, true);
4256 /* Connect all the blocks. */
4257 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4258 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4260 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4261 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4263 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4264 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4265 recompute_dominator (CDI_DOMINATORS, body_bb));
4266 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4267 recompute_dominator (CDI_DOMINATORS, fin_bb));
4271 /* A subroutine of expand_omp_for. Generate code for a parallel
4272 loop with static schedule and a specified chunk size. Given
4275 for (V = N1; V cond N2; V += STEP) BODY;
4277 where COND is "<" or ">", we generate pseudocode
4283 if ((__typeof (V)) -1 > 0 && cond is >)
4284 n = -(adj + N2 - N1) / -STEP;
4286 n = (adj + N2 - N1) / STEP;
4288 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4289 here so that V is defined
4290 if the loop is not entered
4292 s0 = (trip * nthreads + threadid) * CHUNK;
4293 e0 = min(s0 + CHUNK, n);
4294 if (s0 < n) goto L1; else goto L4;
4301 if (V cond e) goto L2; else goto L3;
4309 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4311 tree n, s0, e0, e, t;
4312 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4313 tree type, itype, v_main, v_back, v_extra;
4314 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4315 basic_block trip_update_bb, cont_bb, fin_bb;
4316 gimple_stmt_iterator si;
4320 itype = type = TREE_TYPE (fd->loop.v);
4321 if (POINTER_TYPE_P (type))
4322 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4324 entry_bb = region->entry;
4325 se = split_block (entry_bb, last_stmt (entry_bb));
4327 iter_part_bb = se->dest;
4328 cont_bb = region->cont;
4329 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4330 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4331 == FALLTHRU_EDGE (cont_bb)->dest);
4332 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4333 body_bb = single_succ (seq_start_bb);
4334 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4335 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4336 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4337 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4338 exit_bb = region->exit;
4340 /* Trip and adjustment setup goes in ENTRY_BB. */
4341 si = gsi_last_bb (entry_bb);
4342 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4344 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4345 t = fold_convert (itype, t);
4346 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4347 true, GSI_SAME_STMT);
4349 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4350 t = fold_convert (itype, t);
4351 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4352 true, GSI_SAME_STMT);
4355 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4356 true, NULL_TREE, true, GSI_SAME_STMT);
4358 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4359 true, NULL_TREE, true, GSI_SAME_STMT);
4361 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4362 true, NULL_TREE, true, GSI_SAME_STMT);
4364 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4365 true, NULL_TREE, true, GSI_SAME_STMT);
4367 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4368 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4369 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4370 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4371 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4372 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4373 fold_build1 (NEGATE_EXPR, itype, t),
4374 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4376 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4377 t = fold_convert (itype, t);
4378 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4379 true, GSI_SAME_STMT);
4381 trip_var = create_tmp_var (itype, ".trip");
4382 if (gimple_in_ssa_p (cfun))
4384 add_referenced_var (trip_var);
4385 trip_init = make_ssa_name (trip_var, NULL);
4386 trip_main = make_ssa_name (trip_var, NULL);
4387 trip_back = make_ssa_name (trip_var, NULL);
4391 trip_init = trip_var;
4392 trip_main = trip_var;
4393 trip_back = trip_var;
4396 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4397 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4399 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4400 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4401 if (POINTER_TYPE_P (type))
4402 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4403 fold_convert (sizetype, t));
4405 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4406 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4407 true, GSI_SAME_STMT);
4409 /* Remove the GIMPLE_OMP_FOR. */
4410 gsi_remove (&si, true);
4412 /* Iteration space partitioning goes in ITER_PART_BB. */
4413 si = gsi_last_bb (iter_part_bb);
4415 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4416 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4417 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4418 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4419 false, GSI_CONTINUE_LINKING);
4421 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4422 t = fold_build2 (MIN_EXPR, itype, t, n);
4423 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4424 false, GSI_CONTINUE_LINKING);
4426 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4427 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4429 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4430 si = gsi_start_bb (seq_start_bb);
4432 t = fold_convert (itype, s0);
4433 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4434 if (POINTER_TYPE_P (type))
4435 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4436 fold_convert (sizetype, t));
4438 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4439 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4440 false, GSI_CONTINUE_LINKING);
4441 stmt = gimple_build_assign (fd->loop.v, t);
4442 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4444 t = fold_convert (itype, e0);
4445 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4446 if (POINTER_TYPE_P (type))
4447 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4448 fold_convert (sizetype, t));
4450 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4451 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4452 false, GSI_CONTINUE_LINKING);
4454 /* The code controlling the sequential loop goes in CONT_BB,
4455 replacing the GIMPLE_OMP_CONTINUE. */
4456 si = gsi_last_bb (cont_bb);
4457 stmt = gsi_stmt (si);
4458 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4459 v_main = gimple_omp_continue_control_use (stmt);
4460 v_back = gimple_omp_continue_control_def (stmt);
4462 if (POINTER_TYPE_P (type))
4463 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4464 fold_convert (sizetype, fd->loop.step));
4466 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4467 stmt = gimple_build_assign (v_back, t);
4468 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4470 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4471 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4473 /* Remove GIMPLE_OMP_CONTINUE. */
4474 gsi_remove (&si, true);
4476 /* Trip update code goes into TRIP_UPDATE_BB. */
4477 si = gsi_start_bb (trip_update_bb);
4479 t = build_int_cst (itype, 1);
4480 t = build2 (PLUS_EXPR, itype, trip_main, t);
4481 stmt = gimple_build_assign (trip_back, t);
4482 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4484 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4485 si = gsi_last_bb (exit_bb);
4486 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4487 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4488 false, GSI_SAME_STMT);
4489 gsi_remove (&si, true);
4491 /* Connect the new blocks. */
4492 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4493 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4495 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4496 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4498 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4500 if (gimple_in_ssa_p (cfun))
4502 gimple_stmt_iterator psi;
4505 edge_var_map_vector head;
4509 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4510 remove arguments of the phi nodes in fin_bb. We need to create
4511 appropriate phi nodes in iter_part_bb instead. */
4512 se = single_pred_edge (fin_bb);
4513 re = single_succ_edge (trip_update_bb);
4514 head = redirect_edge_var_map_vector (re);
4515 ene = single_succ_edge (entry_bb);
4517 psi = gsi_start_phis (fin_bb);
4518 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4519 gsi_next (&psi), ++i)
4522 source_location locus;
4524 phi = gsi_stmt (psi);
4525 t = gimple_phi_result (phi);
4526 gcc_assert (t == redirect_edge_var_map_result (vm));
4527 nphi = create_phi_node (t, iter_part_bb);
4528 SSA_NAME_DEF_STMT (t) = nphi;
4530 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4531 locus = gimple_phi_arg_location_from_edge (phi, se);
4533 /* A special case -- fd->loop.v is not yet computed in
4534 iter_part_bb, we need to use v_extra instead. */
4535 if (t == fd->loop.v)
4537 add_phi_arg (nphi, t, ene, locus);
4538 locus = redirect_edge_var_map_location (vm);
4539 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4541 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4542 redirect_edge_var_map_clear (re);
4545 psi = gsi_start_phis (fin_bb);
4546 if (gsi_end_p (psi))
4548 remove_phi_node (&psi, false);
4551 /* Make phi node for trip. */
4552 phi = create_phi_node (trip_main, iter_part_bb);
4553 SSA_NAME_DEF_STMT (trip_main) = phi;
4554 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4556 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4560 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4561 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4562 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4563 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4564 recompute_dominator (CDI_DOMINATORS, fin_bb));
4565 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4566 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4567 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4568 recompute_dominator (CDI_DOMINATORS, body_bb));
4572 /* Expand the OpenMP loop defined by REGION. */
4575 expand_omp_for (struct omp_region *region)
4577 struct omp_for_data fd;
4578 struct omp_for_data_loop *loops;
4581 = (struct omp_for_data_loop *)
4582 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4583 * sizeof (struct omp_for_data_loop));
4584 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4585 region->sched_kind = fd.sched_kind;
4587 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4588 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4589 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4592 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4593 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4594 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4597 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4600 && region->cont != NULL)
4602 if (fd.chunk_size == NULL)
4603 expand_omp_for_static_nochunk (region, &fd);
4605 expand_omp_for_static_chunk (region, &fd);
4609 int fn_index, start_ix, next_ix;
4611 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4612 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4613 ? 3 : fd.sched_kind;
4614 fn_index += fd.have_ordered * 4;
4615 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4616 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4617 if (fd.iter_type == long_long_unsigned_type_node)
4619 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4620 - BUILT_IN_GOMP_LOOP_STATIC_START;
4621 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4622 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4624 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4625 (enum built_in_function) next_ix);
4628 update_ssa (TODO_update_ssa_only_virtuals);
4632 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4634 v = GOMP_sections_start (n);
4651 v = GOMP_sections_next ();
4656 If this is a combined parallel sections, replace the call to
4657 GOMP_sections_start with call to GOMP_sections_next. */
4660 expand_omp_sections (struct omp_region *region)
4662 tree t, u, vin = NULL, vmain, vnext, l2;
4663 VEC (tree,heap) *label_vec;
4665 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4666 gimple_stmt_iterator si, switch_si;
4667 gimple sections_stmt, stmt, cont;
4670 struct omp_region *inner;
4672 bool exit_reachable = region->cont != NULL;
4674 gcc_assert (exit_reachable == (region->exit != NULL));
4675 entry_bb = region->entry;
4676 l0_bb = single_succ (entry_bb);
4677 l1_bb = region->cont;
4678 l2_bb = region->exit;
4681 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4682 l2 = gimple_block_label (l2_bb);
4685 /* This can happen if there are reductions. */
4686 len = EDGE_COUNT (l0_bb->succs);
4687 gcc_assert (len > 0);
4688 e = EDGE_SUCC (l0_bb, len - 1);
4689 si = gsi_last_bb (e->dest);
4692 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4693 l2 = gimple_block_label (e->dest);
4695 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4697 si = gsi_last_bb (e->dest);
4699 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4701 l2 = gimple_block_label (e->dest);
4706 default_bb = create_empty_bb (l1_bb->prev_bb);
4710 default_bb = create_empty_bb (l0_bb);
4711 l2 = gimple_block_label (default_bb);
4714 /* We will build a switch() with enough cases for all the
4715 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4716 and a default case to abort if something goes wrong. */
4717 len = EDGE_COUNT (l0_bb->succs);
4719 /* Use VEC_quick_push on label_vec throughout, since we know the size
4721 label_vec = VEC_alloc (tree, heap, len);
4723 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4724 GIMPLE_OMP_SECTIONS statement. */
4725 si = gsi_last_bb (entry_bb);
4726 sections_stmt = gsi_stmt (si);
4727 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4728 vin = gimple_omp_sections_control (sections_stmt);
4729 if (!is_combined_parallel (region))
4731 /* If we are not inside a combined parallel+sections region,
4732 call GOMP_sections_start. */
4733 t = build_int_cst (unsigned_type_node,
4734 exit_reachable ? len - 1 : len);
4735 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4736 stmt = gimple_build_call (u, 1, t);
4740 /* Otherwise, call GOMP_sections_next. */
4741 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4742 stmt = gimple_build_call (u, 0);
4744 gimple_call_set_lhs (stmt, vin);
4745 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4746 gsi_remove (&si, true);
4748 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4750 switch_si = gsi_last_bb (l0_bb);
4751 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4754 cont = last_stmt (l1_bb);
4755 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4756 vmain = gimple_omp_continue_control_use (cont);
4757 vnext = gimple_omp_continue_control_def (cont);
4768 t = build3 (CASE_LABEL_EXPR, void_type_node,
4769 build_int_cst (unsigned_type_node, 0), NULL, l2);
4770 VEC_quick_push (tree, label_vec, t);
4774 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4775 for (inner = region->inner, casei = 1;
4777 inner = inner->next, i++, casei++)
4779 basic_block s_entry_bb, s_exit_bb;
4781 /* Skip optional reduction region. */
4782 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4789 s_entry_bb = inner->entry;
4790 s_exit_bb = inner->exit;
4792 t = gimple_block_label (s_entry_bb);
4793 u = build_int_cst (unsigned_type_node, casei);
4794 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4795 VEC_quick_push (tree, label_vec, u);
4797 si = gsi_last_bb (s_entry_bb);
4798 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4799 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4800 gsi_remove (&si, true);
4801 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4803 if (s_exit_bb == NULL)
4806 si = gsi_last_bb (s_exit_bb);
4807 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4808 gsi_remove (&si, true);
4810 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4813 /* Error handling code goes in DEFAULT_BB. */
4814 t = gimple_block_label (default_bb);
4815 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4816 make_edge (l0_bb, default_bb, 0);
4818 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4819 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4820 gsi_remove (&switch_si, true);
4821 VEC_free (tree, heap, label_vec);
4823 si = gsi_start_bb (default_bb);
4824 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4825 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4829 /* Code to get the next section goes in L1_BB. */
4830 si = gsi_last_bb (l1_bb);
4831 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4833 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4834 gimple_call_set_lhs (stmt, vnext);
4835 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4836 gsi_remove (&si, true);
4838 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4840 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4841 si = gsi_last_bb (l2_bb);
4842 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4843 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4845 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4846 stmt = gimple_build_call (t, 0);
4847 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4848 gsi_remove (&si, true);
4851 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4855 /* Expand code for an OpenMP single directive. We've already expanded
4856 much of the code, here we simply place the GOMP_barrier call. */
4859 expand_omp_single (struct omp_region *region)
4861 basic_block entry_bb, exit_bb;
4862 gimple_stmt_iterator si;
4863 bool need_barrier = false;
4865 entry_bb = region->entry;
4866 exit_bb = region->exit;
4868 si = gsi_last_bb (entry_bb);
4869 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4870 be removed. We need to ensure that the thread that entered the single
4871 does not exit before the data is copied out by the other threads. */
4872 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4873 OMP_CLAUSE_COPYPRIVATE))
4874 need_barrier = true;
4875 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4876 gsi_remove (&si, true);
4877 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4879 si = gsi_last_bb (exit_bb);
4880 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4881 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4882 false, GSI_SAME_STMT);
4883 gsi_remove (&si, true);
4884 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4888 /* Generic expansion for OpenMP synchronization directives: master,
4889 ordered and critical. All we need to do here is remove the entry
4890 and exit markers for REGION. */
4893 expand_omp_synch (struct omp_region *region)
4895 basic_block entry_bb, exit_bb;
4896 gimple_stmt_iterator si;
4898 entry_bb = region->entry;
4899 exit_bb = region->exit;
4901 si = gsi_last_bb (entry_bb);
4902 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4903 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4904 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4905 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4906 gsi_remove (&si, true);
4907 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4911 si = gsi_last_bb (exit_bb);
4912 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4913 gsi_remove (&si, true);
4914 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4918 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4919 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4920 size of the data type, and thus usable to find the index of the builtin
4921 decl. Returns false if the expression is not of the proper form. */
4924 expand_omp_atomic_fetch_op (basic_block load_bb,
4925 tree addr, tree loaded_val,
4926 tree stored_val, int index)
4928 enum built_in_function base;
4929 tree decl, itype, call;
4932 basic_block store_bb = single_succ (load_bb);
4933 gimple_stmt_iterator gsi;
4937 /* We expect to find the following sequences:
4940 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4943 val = tmp OP something; (or: something OP tmp)
4944 GIMPLE_OMP_STORE (val)
4946 ???FIXME: Allow a more flexible sequence.
4947 Perhaps use data flow to pick the statements.
4951 gsi = gsi_after_labels (store_bb);
4952 stmt = gsi_stmt (gsi);
4953 loc = gimple_location (stmt);
4954 if (!is_gimple_assign (stmt))
4957 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4960 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4963 /* Check for one of the supported fetch-op operations. */
4964 switch (gimple_assign_rhs_code (stmt))
4967 case POINTER_PLUS_EXPR:
4968 base = BUILT_IN_FETCH_AND_ADD_N;
4969 optab = sync_add_optab;
4972 base = BUILT_IN_FETCH_AND_SUB_N;
4973 optab = sync_add_optab;
4976 base = BUILT_IN_FETCH_AND_AND_N;
4977 optab = sync_and_optab;
4980 base = BUILT_IN_FETCH_AND_OR_N;
4981 optab = sync_ior_optab;
4984 base = BUILT_IN_FETCH_AND_XOR_N;
4985 optab = sync_xor_optab;
4990 /* Make sure the expression is of the proper form. */
4991 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4992 rhs = gimple_assign_rhs2 (stmt);
4993 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4994 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4995 rhs = gimple_assign_rhs1 (stmt);
4999 decl = built_in_decls[base + index + 1];
5000 itype = TREE_TYPE (TREE_TYPE (decl));
5002 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5005 gsi = gsi_last_bb (load_bb);
5006 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5007 call = build_call_expr_loc (loc,
5009 fold_convert_loc (loc, itype, rhs));
5010 call = fold_convert_loc (loc, void_type_node, call);
5011 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5012 gsi_remove (&gsi, true);
5014 gsi = gsi_last_bb (store_bb);
5015 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5016 gsi_remove (&gsi, true);
5017 gsi = gsi_last_bb (store_bb);
5018 gsi_remove (&gsi, true);
5020 if (gimple_in_ssa_p (cfun))
5021 update_ssa (TODO_update_ssa_no_phi);
5026 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5030 newval = rhs; // with oldval replacing *addr in rhs
5031 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5032 if (oldval != newval)
5035 INDEX is log2 of the size of the data type, and thus usable to find the
5036 index of the builtin decl. */
5039 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5040 tree addr, tree loaded_val, tree stored_val,
5043 tree loadedi, storedi, initial, new_storedi, old_vali;
5044 tree type, itype, cmpxchg, iaddr;
5045 gimple_stmt_iterator si;
5046 basic_block loop_header = single_succ (load_bb);
5050 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5051 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5052 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5054 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5055 == CODE_FOR_nothing)
5058 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5059 si = gsi_last_bb (load_bb);
5060 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5062 /* For floating-point values, we'll need to view-convert them to integers
5063 so that we can perform the atomic compare and swap. Simplify the
5064 following code by always setting up the "i"ntegral variables. */
5065 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5069 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5072 = force_gimple_operand_gsi (&si,
5073 fold_convert (TREE_TYPE (iaddr), addr),
5074 false, NULL_TREE, true, GSI_SAME_STMT);
5075 stmt = gimple_build_assign (iaddr, iaddr_val);
5076 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5077 loadedi = create_tmp_var (itype, NULL);
5078 if (gimple_in_ssa_p (cfun))
5080 add_referenced_var (iaddr);
5081 add_referenced_var (loadedi);
5082 loadedi = make_ssa_name (loadedi, NULL);
5088 loadedi = loaded_val;
5092 = force_gimple_operand_gsi (&si,
5093 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5095 build_int_cst (TREE_TYPE (iaddr), 0)),
5096 true, NULL_TREE, true, GSI_SAME_STMT);
5098 /* Move the value to the LOADEDI temporary. */
5099 if (gimple_in_ssa_p (cfun))
5101 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5102 phi = create_phi_node (loadedi, loop_header);
5103 SSA_NAME_DEF_STMT (loadedi) = phi;
5104 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5108 gsi_insert_before (&si,
5109 gimple_build_assign (loadedi, initial),
5111 if (loadedi != loaded_val)
5113 gimple_stmt_iterator gsi2;
5116 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5117 gsi2 = gsi_start_bb (loop_header);
5118 if (gimple_in_ssa_p (cfun))
5121 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5122 true, GSI_SAME_STMT);
5123 stmt = gimple_build_assign (loaded_val, x);
5124 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5128 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5129 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5130 true, GSI_SAME_STMT);
5133 gsi_remove (&si, true);
5135 si = gsi_last_bb (store_bb);
5136 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5139 storedi = stored_val;
5142 force_gimple_operand_gsi (&si,
5143 build1 (VIEW_CONVERT_EXPR, itype,
5144 stored_val), true, NULL_TREE, true,
5147 /* Build the compare&swap statement. */
5148 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5149 new_storedi = force_gimple_operand_gsi (&si,
5150 fold_convert (TREE_TYPE (loadedi),
5153 true, GSI_SAME_STMT);
5155 if (gimple_in_ssa_p (cfun))
5159 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5160 if (gimple_in_ssa_p (cfun))
5161 add_referenced_var (old_vali);
5162 stmt = gimple_build_assign (old_vali, loadedi);
5163 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5165 stmt = gimple_build_assign (loadedi, new_storedi);
5166 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5169 /* Note that we always perform the comparison as an integer, even for
5170 floating point. This allows the atomic operation to properly
5171 succeed even with NaNs and -0.0. */
5172 stmt = gimple_build_cond_empty
5173 (build2 (NE_EXPR, boolean_type_node,
5174 new_storedi, old_vali));
5175 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5178 e = single_succ_edge (store_bb);
5179 e->flags &= ~EDGE_FALLTHRU;
5180 e->flags |= EDGE_FALSE_VALUE;
5182 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5184 /* Copy the new value to loadedi (we already did that before the condition
5185 if we are not in SSA). */
5186 if (gimple_in_ssa_p (cfun))
5188 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5189 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5192 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5193 gsi_remove (&si, true);
5195 if (gimple_in_ssa_p (cfun))
5196 update_ssa (TODO_update_ssa_no_phi);
5201 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5203 GOMP_atomic_start ();
5207 The result is not globally atomic, but works so long as all parallel
5208 references are within #pragma omp atomic directives. According to
5209 responses received from omp@openmp.org, appears to be within spec.
5210 Which makes sense, since that's how several other compilers handle
5211 this situation as well.
5212 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5213 expanding. STORED_VAL is the operand of the matching
5214 GIMPLE_OMP_ATOMIC_STORE.
5217 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5221 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5226 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5227 tree addr, tree loaded_val, tree stored_val)
5229 gimple_stmt_iterator si;
5233 si = gsi_last_bb (load_bb);
5234 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5236 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5237 t = build_call_expr (t, 0);
5238 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5240 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5241 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5242 gsi_remove (&si, true);
5244 si = gsi_last_bb (store_bb);
5245 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5247 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5249 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5251 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5252 t = build_call_expr (t, 0);
5253 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5254 gsi_remove (&si, true);
5256 if (gimple_in_ssa_p (cfun))
5257 update_ssa (TODO_update_ssa_no_phi);
5261 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5262 using expand_omp_atomic_fetch_op. If it failed, we try to
5263 call expand_omp_atomic_pipeline, and if it fails too, the
5264 ultimate fallback is wrapping the operation in a mutex
5265 (expand_omp_atomic_mutex). REGION is the atomic region built
5266 by build_omp_regions_1(). */
5269 expand_omp_atomic (struct omp_region *region)
5271 basic_block load_bb = region->entry, store_bb = region->exit;
5272 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5273 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5274 tree addr = gimple_omp_atomic_load_rhs (load);
5275 tree stored_val = gimple_omp_atomic_store_val (store);
5276 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5277 HOST_WIDE_INT index;
5279 /* Make sure the type is one of the supported sizes. */
5280 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5281 index = exact_log2 (index);
5282 if (index >= 0 && index <= 4)
5284 unsigned int align = TYPE_ALIGN_UNIT (type);
5286 /* __sync builtins require strict data alignment. */
5287 if (exact_log2 (align) >= index)
5289 /* When possible, use specialized atomic update functions. */
5290 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5291 && store_bb == single_succ (load_bb))
5293 if (expand_omp_atomic_fetch_op (load_bb, addr,
5294 loaded_val, stored_val, index))
5298 /* If we don't have specialized __sync builtins, try and implement
5299 as a compare and swap loop. */
5300 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5301 loaded_val, stored_val, index))
5306 /* The ultimate fallback is wrapping the operation in a mutex. */
5307 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5311 /* Expand the parallel region tree rooted at REGION. Expansion
5312 proceeds in depth-first order. Innermost regions are expanded
5313 first. This way, parallel regions that require a new function to
5314 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5315 internal dependencies in their body. */
5318 expand_omp (struct omp_region *region)
5322 location_t saved_location;
5324 /* First, determine whether this is a combined parallel+workshare
5326 if (region->type == GIMPLE_OMP_PARALLEL)
5327 determine_parallel_type (region);
5330 expand_omp (region->inner);
5332 saved_location = input_location;
5333 if (gimple_has_location (last_stmt (region->entry)))
5334 input_location = gimple_location (last_stmt (region->entry));
5336 switch (region->type)
5338 case GIMPLE_OMP_PARALLEL:
5339 case GIMPLE_OMP_TASK:
5340 expand_omp_taskreg (region);
5343 case GIMPLE_OMP_FOR:
5344 expand_omp_for (region);
5347 case GIMPLE_OMP_SECTIONS:
5348 expand_omp_sections (region);
5351 case GIMPLE_OMP_SECTION:
5352 /* Individual omp sections are handled together with their
5353 parent GIMPLE_OMP_SECTIONS region. */
5356 case GIMPLE_OMP_SINGLE:
5357 expand_omp_single (region);
5360 case GIMPLE_OMP_MASTER:
5361 case GIMPLE_OMP_ORDERED:
5362 case GIMPLE_OMP_CRITICAL:
5363 expand_omp_synch (region);
5366 case GIMPLE_OMP_ATOMIC_LOAD:
5367 expand_omp_atomic (region);
5374 input_location = saved_location;
5375 region = region->next;
5380 /* Helper for build_omp_regions. Scan the dominator tree starting at
5381 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5382 true, the function ends once a single tree is built (otherwise, whole
5383 forest of OMP constructs may be built). */
5386 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5389 gimple_stmt_iterator gsi;
5393 gsi = gsi_last_bb (bb);
5394 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5396 struct omp_region *region;
5397 enum gimple_code code;
5399 stmt = gsi_stmt (gsi);
5400 code = gimple_code (stmt);
5401 if (code == GIMPLE_OMP_RETURN)
5403 /* STMT is the return point out of region PARENT. Mark it
5404 as the exit point and make PARENT the immediately
5405 enclosing region. */
5406 gcc_assert (parent);
5409 parent = parent->outer;
5411 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5413 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5414 GIMPLE_OMP_RETURN, but matches with
5415 GIMPLE_OMP_ATOMIC_LOAD. */
5416 gcc_assert (parent);
5417 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5420 parent = parent->outer;
5423 else if (code == GIMPLE_OMP_CONTINUE)
5425 gcc_assert (parent);
5428 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5430 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5431 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5436 /* Otherwise, this directive becomes the parent for a new
5438 region = new_omp_region (bb, code, parent);
5443 if (single_tree && !parent)
5446 for (son = first_dom_son (CDI_DOMINATORS, bb);
5448 son = next_dom_son (CDI_DOMINATORS, son))
5449 build_omp_regions_1 (son, parent, single_tree);
5452 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5456 build_omp_regions_root (basic_block root)
5458 gcc_assert (root_omp_region == NULL);
5459 build_omp_regions_1 (root, NULL, true);
5460 gcc_assert (root_omp_region != NULL);
5463 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5466 omp_expand_local (basic_block head)
5468 build_omp_regions_root (head);
5469 if (dump_file && (dump_flags & TDF_DETAILS))
5471 fprintf (dump_file, "\nOMP region tree\n\n");
5472 dump_omp_region (dump_file, root_omp_region, 0);
5473 fprintf (dump_file, "\n");
5476 remove_exit_barriers (root_omp_region);
5477 expand_omp (root_omp_region);
5479 free_omp_regions ();
5482 /* Scan the CFG and build a tree of OMP regions. Return the root of
5483 the OMP region tree. */
5486 build_omp_regions (void)
5488 gcc_assert (root_omp_region == NULL);
5489 calculate_dominance_info (CDI_DOMINATORS);
5490 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5493 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5496 execute_expand_omp (void)
5498 build_omp_regions ();
5500 if (!root_omp_region)
5505 fprintf (dump_file, "\nOMP region tree\n\n");
5506 dump_omp_region (dump_file, root_omp_region, 0);
5507 fprintf (dump_file, "\n");
5510 remove_exit_barriers (root_omp_region);
5512 expand_omp (root_omp_region);
5514 cleanup_tree_cfg ();
5516 free_omp_regions ();
5521 /* OMP expansion -- the default pass, run before creation of SSA form. */
5524 gate_expand_omp (void)
5526 return (flag_openmp != 0 && !seen_error ());
5529 struct gimple_opt_pass pass_expand_omp =
5533 "ompexp", /* name */
5534 gate_expand_omp, /* gate */
5535 execute_expand_omp, /* execute */
5538 0, /* static_pass_number */
5539 TV_NONE, /* tv_id */
5540 PROP_gimple_any, /* properties_required */
5541 0, /* properties_provided */
5542 0, /* properties_destroyed */
5543 0, /* todo_flags_start */
5544 TODO_dump_func /* todo_flags_finish */
5548 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5550 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5551 CTX is the enclosing OMP context for the current statement. */
5554 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5556 tree block, control;
5557 gimple_stmt_iterator tgsi;
5559 gimple stmt, new_stmt, bind, t;
5560 gimple_seq ilist, dlist, olist, new_body, body;
5561 struct gimplify_ctx gctx;
5563 stmt = gsi_stmt (*gsi_p);
5565 push_gimplify_context (&gctx);
5569 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5570 &ilist, &dlist, ctx);
5572 tgsi = gsi_start (gimple_omp_body (stmt));
5573 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5576 tgsi = gsi_start (gimple_omp_body (stmt));
5578 for (i = 0; i < len; i++, gsi_next (&tgsi))
5583 sec_start = gsi_stmt (tgsi);
5584 sctx = maybe_lookup_ctx (sec_start);
5587 gimple_seq_add_stmt (&body, sec_start);
5589 lower_omp (gimple_omp_body (sec_start), sctx);
5590 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5591 gimple_omp_set_body (sec_start, NULL);
5595 gimple_seq l = NULL;
5596 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5598 gimple_seq_add_seq (&body, l);
5599 gimple_omp_section_set_last (sec_start);
5602 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5605 block = make_node (BLOCK);
5606 bind = gimple_build_bind (NULL, body, block);
5609 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5611 block = make_node (BLOCK);
5612 new_stmt = gimple_build_bind (NULL, NULL, block);
5614 pop_gimplify_context (new_stmt);
5615 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5616 BLOCK_VARS (block) = gimple_bind_vars (bind);
5617 if (BLOCK_VARS (block))
5618 TREE_USED (block) = 1;
5621 gimple_seq_add_seq (&new_body, ilist);
5622 gimple_seq_add_stmt (&new_body, stmt);
5623 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5624 gimple_seq_add_stmt (&new_body, bind);
5626 control = create_tmp_var (unsigned_type_node, ".section");
5627 t = gimple_build_omp_continue (control, control);
5628 gimple_omp_sections_set_control (stmt, control);
5629 gimple_seq_add_stmt (&new_body, t);
5631 gimple_seq_add_seq (&new_body, olist);
5632 gimple_seq_add_seq (&new_body, dlist);
5634 new_body = maybe_catch_exception (new_body);
5636 t = gimple_build_omp_return
5637 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5638 OMP_CLAUSE_NOWAIT));
5639 gimple_seq_add_stmt (&new_body, t);
5641 gimple_bind_set_body (new_stmt, new_body);
5642 gimple_omp_set_body (stmt, NULL);
5644 gsi_replace (gsi_p, new_stmt, true);
5648 /* A subroutine of lower_omp_single. Expand the simple form of
5649 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5651 if (GOMP_single_start ())
5653 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5655 FIXME. It may be better to delay expanding the logic of this until
5656 pass_expand_omp. The expanded logic may make the job more difficult
5657 to a synchronization analysis pass. */
5660 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5662 location_t loc = gimple_location (single_stmt);
5663 tree tlabel = create_artificial_label (loc);
5664 tree flabel = create_artificial_label (loc);
5668 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5669 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5670 call = gimple_build_call (decl, 0);
5671 gimple_call_set_lhs (call, lhs);
5672 gimple_seq_add_stmt (pre_p, call);
5674 cond = gimple_build_cond (EQ_EXPR, lhs,
5675 fold_convert_loc (loc, TREE_TYPE (lhs),
5678 gimple_seq_add_stmt (pre_p, cond);
5679 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5680 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5681 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5685 /* A subroutine of lower_omp_single. Expand the simple form of
5686 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5688 #pragma omp single copyprivate (a, b, c)
5690 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5693 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5699 GOMP_single_copy_end (©out);
5710 FIXME. It may be better to delay expanding the logic of this until
5711 pass_expand_omp. The expanded logic may make the job more difficult
5712 to a synchronization analysis pass. */
5715 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5717 tree ptr_type, t, l0, l1, l2;
5718 gimple_seq copyin_seq;
5719 location_t loc = gimple_location (single_stmt);
5721 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5723 ptr_type = build_pointer_type (ctx->record_type);
5724 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5726 l0 = create_artificial_label (loc);
5727 l1 = create_artificial_label (loc);
5728 l2 = create_artificial_label (loc);
5730 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5731 t = fold_convert_loc (loc, ptr_type, t);
5732 gimplify_assign (ctx->receiver_decl, t, pre_p);
5734 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5735 build_int_cst (ptr_type, 0));
5736 t = build3 (COND_EXPR, void_type_node, t,
5737 build_and_jump (&l0), build_and_jump (&l1));
5738 gimplify_and_add (t, pre_p);
5740 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5742 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5745 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5748 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5749 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5751 gimplify_and_add (t, pre_p);
5753 t = build_and_jump (&l2);
5754 gimplify_and_add (t, pre_p);
5756 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5758 gimple_seq_add_seq (pre_p, copyin_seq);
5760 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5764 /* Expand code for an OpenMP single directive. */
5767 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5770 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5771 gimple_seq bind_body, dlist;
5772 struct gimplify_ctx gctx;
5774 push_gimplify_context (&gctx);
5777 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5778 &bind_body, &dlist, ctx);
5779 lower_omp (gimple_omp_body (single_stmt), ctx);
5781 gimple_seq_add_stmt (&bind_body, single_stmt);
5783 if (ctx->record_type)
5784 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5786 lower_omp_single_simple (single_stmt, &bind_body);
5788 gimple_omp_set_body (single_stmt, NULL);
5790 gimple_seq_add_seq (&bind_body, dlist);
5792 bind_body = maybe_catch_exception (bind_body);
5794 t = gimple_build_omp_return
5795 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5796 OMP_CLAUSE_NOWAIT));
5797 gimple_seq_add_stmt (&bind_body, t);
5799 block = make_node (BLOCK);
5800 bind = gimple_build_bind (NULL, bind_body, block);
5802 pop_gimplify_context (bind);
5804 gimple_bind_append_vars (bind, ctx->block_vars);
5805 BLOCK_VARS (block) = ctx->block_vars;
5806 gsi_replace (gsi_p, bind, true);
5807 if (BLOCK_VARS (block))
5808 TREE_USED (block) = 1;
5812 /* Expand code for an OpenMP master directive. */
5815 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5817 tree block, lab = NULL, x;
5818 gimple stmt = gsi_stmt (*gsi_p), bind;
5819 location_t loc = gimple_location (stmt);
5821 struct gimplify_ctx gctx;
5823 push_gimplify_context (&gctx);
5825 block = make_node (BLOCK);
5826 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5829 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5830 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5831 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5833 gimplify_and_add (x, &tseq);
5834 gimple_bind_add_seq (bind, tseq);
5836 lower_omp (gimple_omp_body (stmt), ctx);
5837 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5838 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5839 gimple_omp_set_body (stmt, NULL);
5841 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5843 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5845 pop_gimplify_context (bind);
5847 gimple_bind_append_vars (bind, ctx->block_vars);
5848 BLOCK_VARS (block) = ctx->block_vars;
5849 gsi_replace (gsi_p, bind, true);
5853 /* Expand code for an OpenMP ordered directive. */
5856 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5859 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5860 struct gimplify_ctx gctx;
5862 push_gimplify_context (&gctx);
5864 block = make_node (BLOCK);
5865 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5868 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5869 gimple_bind_add_stmt (bind, x);
5871 lower_omp (gimple_omp_body (stmt), ctx);
5872 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5873 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5874 gimple_omp_set_body (stmt, NULL);
5876 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5877 gimple_bind_add_stmt (bind, x);
5879 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5881 pop_gimplify_context (bind);
5883 gimple_bind_append_vars (bind, ctx->block_vars);
5884 BLOCK_VARS (block) = gimple_bind_vars (bind);
5885 gsi_replace (gsi_p, bind, true);
5889 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5890 substitution of a couple of function calls. But in the NAMED case,
5891 requires that languages coordinate a symbol name. It is therefore
5892 best put here in common code. */
5894 static GTY((param1_is (tree), param2_is (tree)))
5895 splay_tree critical_name_mutexes;
5898 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5901 tree name, lock, unlock;
5902 gimple stmt = gsi_stmt (*gsi_p), bind;
5903 location_t loc = gimple_location (stmt);
5905 struct gimplify_ctx gctx;
5907 name = gimple_omp_critical_name (stmt);
5913 if (!critical_name_mutexes)
5914 critical_name_mutexes
5915 = splay_tree_new_ggc (splay_tree_compare_pointers,
5916 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5917 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5919 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5924 decl = create_tmp_var_raw (ptr_type_node, NULL);
5926 new_str = ACONCAT ((".gomp_critical_user_",
5927 IDENTIFIER_POINTER (name), NULL));
5928 DECL_NAME (decl) = get_identifier (new_str);
5929 TREE_PUBLIC (decl) = 1;
5930 TREE_STATIC (decl) = 1;
5931 DECL_COMMON (decl) = 1;
5932 DECL_ARTIFICIAL (decl) = 1;
5933 DECL_IGNORED_P (decl) = 1;
5934 varpool_finalize_decl (decl);
5936 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5937 (splay_tree_value) decl);
5940 decl = (tree) n->value;
5942 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5943 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5945 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5946 unlock = build_call_expr_loc (loc, unlock, 1,
5947 build_fold_addr_expr_loc (loc, decl));
5951 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5952 lock = build_call_expr_loc (loc, lock, 0);
5954 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5955 unlock = build_call_expr_loc (loc, unlock, 0);
5958 push_gimplify_context (&gctx);
5960 block = make_node (BLOCK);
5961 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5963 tbody = gimple_bind_body (bind);
5964 gimplify_and_add (lock, &tbody);
5965 gimple_bind_set_body (bind, tbody);
5967 lower_omp (gimple_omp_body (stmt), ctx);
5968 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5969 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5970 gimple_omp_set_body (stmt, NULL);
5972 tbody = gimple_bind_body (bind);
5973 gimplify_and_add (unlock, &tbody);
5974 gimple_bind_set_body (bind, tbody);
5976 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5978 pop_gimplify_context (bind);
5979 gimple_bind_append_vars (bind, ctx->block_vars);
5980 BLOCK_VARS (block) = gimple_bind_vars (bind);
5981 gsi_replace (gsi_p, bind, true);
5985 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5986 for a lastprivate clause. Given a loop control predicate of (V
5987 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5988 is appended to *DLIST, iterator initialization is appended to
5992 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5993 gimple_seq *dlist, struct omp_context *ctx)
5995 tree clauses, cond, vinit;
5996 enum tree_code cond_code;
5999 cond_code = fd->loop.cond_code;
6000 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6002 /* When possible, use a strict equality expression. This can let VRP
6003 type optimizations deduce the value and remove a copy. */
6004 if (host_integerp (fd->loop.step, 0))
6006 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6007 if (step == 1 || step == -1)
6008 cond_code = EQ_EXPR;
6011 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6013 clauses = gimple_omp_for_clauses (fd->for_stmt);
6015 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6016 if (!gimple_seq_empty_p (stmts))
6018 gimple_seq_add_seq (&stmts, *dlist);
6021 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6022 vinit = fd->loop.n1;
6023 if (cond_code == EQ_EXPR
6024 && host_integerp (fd->loop.n2, 0)
6025 && ! integer_zerop (fd->loop.n2))
6026 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6028 /* Initialize the iterator variable, so that threads that don't execute
6029 any iterations don't execute the lastprivate clauses by accident. */
6030 gimplify_assign (fd->loop.v, vinit, body_p);
6035 /* Lower code for an OpenMP loop directive. */
6038 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6041 struct omp_for_data fd;
6042 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6043 gimple_seq omp_for_body, body, dlist;
6045 struct gimplify_ctx gctx;
6047 push_gimplify_context (&gctx);
6049 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6050 lower_omp (gimple_omp_body (stmt), ctx);
6052 block = make_node (BLOCK);
6053 new_stmt = gimple_build_bind (NULL, NULL, block);
6055 /* Move declaration of temporaries in the loop body before we make
6057 omp_for_body = gimple_omp_body (stmt);
6058 if (!gimple_seq_empty_p (omp_for_body)
6059 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6061 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6062 gimple_bind_append_vars (new_stmt, vars);
6065 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6068 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6069 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6071 /* Lower the header expressions. At this point, we can assume that
6072 the header is of the form:
6074 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6076 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6077 using the .omp_data_s mapping, if needed. */
6078 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6080 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6081 if (!is_gimple_min_invariant (*rhs_p))
6082 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6084 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6085 if (!is_gimple_min_invariant (*rhs_p))
6086 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6088 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6089 if (!is_gimple_min_invariant (*rhs_p))
6090 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6093 /* Once lowered, extract the bounds and clauses. */
6094 extract_omp_for_data (stmt, &fd, NULL);
6096 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6098 gimple_seq_add_stmt (&body, stmt);
6099 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6101 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6104 /* After the loop, add exit clauses. */
6105 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6106 gimple_seq_add_seq (&body, dlist);
6108 body = maybe_catch_exception (body);
6110 /* Region exit marker goes at the end of the loop body. */
6111 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6113 pop_gimplify_context (new_stmt);
6115 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6116 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6117 if (BLOCK_VARS (block))
6118 TREE_USED (block) = 1;
6120 gimple_bind_set_body (new_stmt, body);
6121 gimple_omp_set_body (stmt, NULL);
6122 gimple_omp_for_set_pre_body (stmt, NULL);
6123 gsi_replace (gsi_p, new_stmt, true);
6126 /* Callback for walk_stmts. Check if the current statement only contains
6127 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6130 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6131 bool *handled_ops_p,
6132 struct walk_stmt_info *wi)
6134 int *info = (int *) wi->info;
6135 gimple stmt = gsi_stmt (*gsi_p);
6137 *handled_ops_p = true;
6138 switch (gimple_code (stmt))
6142 case GIMPLE_OMP_FOR:
6143 case GIMPLE_OMP_SECTIONS:
6144 *info = *info == 0 ? 1 : -1;
6153 struct omp_taskcopy_context
6155 /* This field must be at the beginning, as we do "inheritance": Some
6156 callback functions for tree-inline.c (e.g., omp_copy_decl)
6157 receive a copy_body_data pointer that is up-casted to an
6158 omp_context pointer. */
6164 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6166 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6168 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6169 return create_tmp_var (TREE_TYPE (var), NULL);
6175 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6177 tree name, new_fields = NULL, type, f;
6179 type = lang_hooks.types.make_type (RECORD_TYPE);
6180 name = DECL_NAME (TYPE_NAME (orig_type));
6181 name = build_decl (gimple_location (tcctx->ctx->stmt),
6182 TYPE_DECL, name, type);
6183 TYPE_NAME (type) = name;
6185 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6187 tree new_f = copy_node (f);
6188 DECL_CONTEXT (new_f) = type;
6189 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6190 TREE_CHAIN (new_f) = new_fields;
6191 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6192 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6193 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6196 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6198 TYPE_FIELDS (type) = nreverse (new_fields);
6203 /* Create task copyfn. */
6206 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6208 struct function *child_cfun;
6209 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6210 tree record_type, srecord_type, bind, list;
6211 bool record_needs_remap = false, srecord_needs_remap = false;
6213 struct omp_taskcopy_context tcctx;
6214 struct gimplify_ctx gctx;
6215 location_t loc = gimple_location (task_stmt);
6217 child_fn = gimple_omp_task_copy_fn (task_stmt);
6218 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6219 gcc_assert (child_cfun->cfg == NULL);
6220 child_cfun->dont_save_pending_sizes_p = 1;
6221 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6223 /* Reset DECL_CONTEXT on function arguments. */
6224 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6225 DECL_CONTEXT (t) = child_fn;
6227 /* Populate the function. */
6228 push_gimplify_context (&gctx);
6229 current_function_decl = child_fn;
6231 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6232 TREE_SIDE_EFFECTS (bind) = 1;
6234 DECL_SAVED_TREE (child_fn) = bind;
6235 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6237 /* Remap src and dst argument types if needed. */
6238 record_type = ctx->record_type;
6239 srecord_type = ctx->srecord_type;
6240 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6241 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6243 record_needs_remap = true;
6246 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6247 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6249 srecord_needs_remap = true;
6253 if (record_needs_remap || srecord_needs_remap)
6255 memset (&tcctx, '\0', sizeof (tcctx));
6256 tcctx.cb.src_fn = ctx->cb.src_fn;
6257 tcctx.cb.dst_fn = child_fn;
6258 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6259 tcctx.cb.dst_node = tcctx.cb.src_node;
6260 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6261 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6262 tcctx.cb.eh_lp_nr = 0;
6263 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6264 tcctx.cb.decl_map = pointer_map_create ();
6267 if (record_needs_remap)
6268 record_type = task_copyfn_remap_type (&tcctx, record_type);
6269 if (srecord_needs_remap)
6270 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6273 tcctx.cb.decl_map = NULL;
6275 push_cfun (child_cfun);
6277 arg = DECL_ARGUMENTS (child_fn);
6278 TREE_TYPE (arg) = build_pointer_type (record_type);
6279 sarg = TREE_CHAIN (arg);
6280 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6282 /* First pass: initialize temporaries used in record_type and srecord_type
6283 sizes and field offsets. */
6284 if (tcctx.cb.decl_map)
6285 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6286 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6290 decl = OMP_CLAUSE_DECL (c);
6291 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6294 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6295 sf = (tree) n->value;
6296 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6297 src = build_simple_mem_ref_loc (loc, sarg);
6298 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6299 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6300 append_to_statement_list (t, &list);
6303 /* Second pass: copy shared var pointers and copy construct non-VLA
6304 firstprivate vars. */
6305 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6306 switch (OMP_CLAUSE_CODE (c))
6308 case OMP_CLAUSE_SHARED:
6309 decl = OMP_CLAUSE_DECL (c);
6310 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6313 f = (tree) n->value;
6314 if (tcctx.cb.decl_map)
6315 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6316 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6317 sf = (tree) n->value;
6318 if (tcctx.cb.decl_map)
6319 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6320 src = build_simple_mem_ref_loc (loc, sarg);
6321 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6322 dst = build_simple_mem_ref_loc (loc, arg);
6323 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6324 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6325 append_to_statement_list (t, &list);
6327 case OMP_CLAUSE_FIRSTPRIVATE:
6328 decl = OMP_CLAUSE_DECL (c);
6329 if (is_variable_sized (decl))
6331 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6334 f = (tree) n->value;
6335 if (tcctx.cb.decl_map)
6336 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6337 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6340 sf = (tree) n->value;
6341 if (tcctx.cb.decl_map)
6342 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6343 src = build_simple_mem_ref_loc (loc, sarg);
6344 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6345 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6346 src = build_simple_mem_ref_loc (loc, src);
6350 dst = build_simple_mem_ref_loc (loc, arg);
6351 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6352 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6353 append_to_statement_list (t, &list);
6355 case OMP_CLAUSE_PRIVATE:
6356 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6358 decl = OMP_CLAUSE_DECL (c);
6359 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6360 f = (tree) n->value;
6361 if (tcctx.cb.decl_map)
6362 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6363 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6366 sf = (tree) n->value;
6367 if (tcctx.cb.decl_map)
6368 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6369 src = build_simple_mem_ref_loc (loc, sarg);
6370 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6371 if (use_pointer_for_field (decl, NULL))
6372 src = build_simple_mem_ref_loc (loc, src);
6376 dst = build_simple_mem_ref_loc (loc, arg);
6377 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6378 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6379 append_to_statement_list (t, &list);
6385 /* Last pass: handle VLA firstprivates. */
6386 if (tcctx.cb.decl_map)
6387 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6388 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6392 decl = OMP_CLAUSE_DECL (c);
6393 if (!is_variable_sized (decl))
6395 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6398 f = (tree) n->value;
6399 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6400 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6401 ind = DECL_VALUE_EXPR (decl);
6402 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6403 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6404 n = splay_tree_lookup (ctx->sfield_map,
6405 (splay_tree_key) TREE_OPERAND (ind, 0));
6406 sf = (tree) n->value;
6407 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6408 src = build_simple_mem_ref_loc (loc, sarg);
6409 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6410 src = build_simple_mem_ref_loc (loc, src);
6411 dst = build_simple_mem_ref_loc (loc, arg);
6412 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6413 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6414 append_to_statement_list (t, &list);
6415 n = splay_tree_lookup (ctx->field_map,
6416 (splay_tree_key) TREE_OPERAND (ind, 0));
6417 df = (tree) n->value;
6418 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6419 ptr = build_simple_mem_ref_loc (loc, arg);
6420 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6421 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6422 build_fold_addr_expr_loc (loc, dst));
6423 append_to_statement_list (t, &list);
6426 t = build1 (RETURN_EXPR, void_type_node, NULL);
6427 append_to_statement_list (t, &list);
6429 if (tcctx.cb.decl_map)
6430 pointer_map_destroy (tcctx.cb.decl_map);
6431 pop_gimplify_context (NULL);
6432 BIND_EXPR_BODY (bind) = list;
6434 current_function_decl = ctx->cb.src_fn;
6437 /* Lower the OpenMP parallel or task directive in the current statement
6438 in GSI_P. CTX holds context information for the directive. */
6441 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6445 gimple stmt = gsi_stmt (*gsi_p);
6446 gimple par_bind, bind;
6447 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6448 struct gimplify_ctx gctx;
6449 location_t loc = gimple_location (stmt);
6451 clauses = gimple_omp_taskreg_clauses (stmt);
6452 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6453 par_body = gimple_bind_body (par_bind);
6454 child_fn = ctx->cb.dst_fn;
6455 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6456 && !gimple_omp_parallel_combined_p (stmt))
6458 struct walk_stmt_info wi;
6461 memset (&wi, 0, sizeof (wi));
6464 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6466 gimple_omp_parallel_set_combined_p (stmt, true);
6468 if (ctx->srecord_type)
6469 create_task_copyfn (stmt, ctx);
6471 push_gimplify_context (&gctx);
6475 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6476 lower_omp (par_body, ctx);
6477 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6478 lower_reduction_clauses (clauses, &par_olist, ctx);
6480 /* Declare all the variables created by mapping and the variables
6481 declared in the scope of the parallel body. */
6482 record_vars_into (ctx->block_vars, child_fn);
6483 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6485 if (ctx->record_type)
6488 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6489 : ctx->record_type, ".omp_data_o");
6490 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6491 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6496 lower_send_clauses (clauses, &ilist, &olist, ctx);
6497 lower_send_shared_vars (&ilist, &olist, ctx);
6499 /* Once all the expansions are done, sequence all the different
6500 fragments inside gimple_omp_body. */
6504 if (ctx->record_type)
6506 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6507 /* fixup_child_record_type might have changed receiver_decl's type. */
6508 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6509 gimple_seq_add_stmt (&new_body,
6510 gimple_build_assign (ctx->receiver_decl, t));
6513 gimple_seq_add_seq (&new_body, par_ilist);
6514 gimple_seq_add_seq (&new_body, par_body);
6515 gimple_seq_add_seq (&new_body, par_olist);
6516 new_body = maybe_catch_exception (new_body);
6517 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6518 gimple_omp_set_body (stmt, new_body);
6520 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6521 gimple_bind_add_stmt (bind, stmt);
6524 gimple_seq_add_stmt (&ilist, bind);
6525 gimple_seq_add_seq (&ilist, olist);
6526 bind = gimple_build_bind (NULL, ilist, NULL);
6529 gsi_replace (gsi_p, bind, true);
6531 pop_gimplify_context (NULL);
6534 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6535 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6536 of OpenMP context, but with task_shared_vars set. */
6539 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6544 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6545 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6548 if (task_shared_vars
6550 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6553 /* If a global variable has been privatized, TREE_CONSTANT on
6554 ADDR_EXPR might be wrong. */
6555 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6556 recompute_tree_invariant_for_addr_expr (t);
6558 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6563 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6565 gimple stmt = gsi_stmt (*gsi_p);
6566 struct walk_stmt_info wi;
6568 if (gimple_has_location (stmt))
6569 input_location = gimple_location (stmt);
6571 if (task_shared_vars)
6572 memset (&wi, '\0', sizeof (wi));
6574 /* If we have issued syntax errors, avoid doing any heavy lifting.
6575 Just replace the OpenMP directives with a NOP to avoid
6576 confusing RTL expansion. */
6577 if (seen_error () && is_gimple_omp (stmt))
6579 gsi_replace (gsi_p, gimple_build_nop (), true);
6583 switch (gimple_code (stmt))
6586 if ((ctx || task_shared_vars)
6587 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6588 ctx ? NULL : &wi, NULL)
6589 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6590 ctx ? NULL : &wi, NULL)))
6591 gimple_regimplify_operands (stmt, gsi_p);
6594 lower_omp (gimple_catch_handler (stmt), ctx);
6596 case GIMPLE_EH_FILTER:
6597 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6600 lower_omp (gimple_try_eval (stmt), ctx);
6601 lower_omp (gimple_try_cleanup (stmt), ctx);
6604 lower_omp (gimple_bind_body (stmt), ctx);
6606 case GIMPLE_OMP_PARALLEL:
6607 case GIMPLE_OMP_TASK:
6608 ctx = maybe_lookup_ctx (stmt);
6609 lower_omp_taskreg (gsi_p, ctx);
6611 case GIMPLE_OMP_FOR:
6612 ctx = maybe_lookup_ctx (stmt);
6614 lower_omp_for (gsi_p, ctx);
6616 case GIMPLE_OMP_SECTIONS:
6617 ctx = maybe_lookup_ctx (stmt);
6619 lower_omp_sections (gsi_p, ctx);
6621 case GIMPLE_OMP_SINGLE:
6622 ctx = maybe_lookup_ctx (stmt);
6624 lower_omp_single (gsi_p, ctx);
6626 case GIMPLE_OMP_MASTER:
6627 ctx = maybe_lookup_ctx (stmt);
6629 lower_omp_master (gsi_p, ctx);
6631 case GIMPLE_OMP_ORDERED:
6632 ctx = maybe_lookup_ctx (stmt);
6634 lower_omp_ordered (gsi_p, ctx);
6636 case GIMPLE_OMP_CRITICAL:
6637 ctx = maybe_lookup_ctx (stmt);
6639 lower_omp_critical (gsi_p, ctx);
6641 case GIMPLE_OMP_ATOMIC_LOAD:
6642 if ((ctx || task_shared_vars)
6643 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6644 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6645 gimple_regimplify_operands (stmt, gsi_p);
6648 if ((ctx || task_shared_vars)
6649 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6651 gimple_regimplify_operands (stmt, gsi_p);
6657 lower_omp (gimple_seq body, omp_context *ctx)
6659 location_t saved_location = input_location;
6660 gimple_stmt_iterator gsi = gsi_start (body);
6661 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6662 lower_omp_1 (&gsi, ctx);
6663 input_location = saved_location;
6666 /* Main entry point. */
6669 execute_lower_omp (void)
6673 /* This pass always runs, to provide PROP_gimple_lomp.
6674 But there is nothing to do unless -fopenmp is given. */
6675 if (flag_openmp == 0)
6678 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6679 delete_omp_context);
6681 body = gimple_body (current_function_decl);
6682 scan_omp (body, NULL);
6683 gcc_assert (taskreg_nesting_level == 0);
6685 if (all_contexts->root)
6687 struct gimplify_ctx gctx;
6689 if (task_shared_vars)
6690 push_gimplify_context (&gctx);
6691 lower_omp (body, NULL);
6692 if (task_shared_vars)
6693 pop_gimplify_context (NULL);
6698 splay_tree_delete (all_contexts);
6699 all_contexts = NULL;
6701 BITMAP_FREE (task_shared_vars);
6705 struct gimple_opt_pass pass_lower_omp =
6709 "omplower", /* name */
6711 execute_lower_omp, /* execute */
6714 0, /* static_pass_number */
6715 TV_NONE, /* tv_id */
6716 PROP_gimple_any, /* properties_required */
6717 PROP_gimple_lomp, /* properties_provided */
6718 0, /* properties_destroyed */
6719 0, /* todo_flags_start */
6720 TODO_dump_func /* todo_flags_finish */
6724 /* The following is a utility to diagnose OpenMP structured block violations.
6725 It is not part of the "omplower" pass, as that's invoked too late. It
6726 should be invoked by the respective front ends after gimplification. */
6728 static splay_tree all_labels;
6730 /* Check for mismatched contexts and generate an error if needed. Return
6731 true if an error is detected. */
6734 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6735 gimple branch_ctx, gimple label_ctx)
6737 if (label_ctx == branch_ctx)
6742 Previously we kept track of the label's entire context in diagnose_sb_[12]
6743 so we could traverse it and issue a correct "exit" or "enter" error
6744 message upon a structured block violation.
6746 We built the context by building a list with tree_cons'ing, but there is
6747 no easy counterpart in gimple tuples. It seems like far too much work
6748 for issuing exit/enter error messages. If someone really misses the
6749 distinct error message... patches welcome.
6753 /* Try to avoid confusing the user by producing and error message
6754 with correct "exit" or "enter" verbiage. We prefer "exit"
6755 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6756 if (branch_ctx == NULL)
6762 if (TREE_VALUE (label_ctx) == branch_ctx)
6767 label_ctx = TREE_CHAIN (label_ctx);
6772 error ("invalid exit from OpenMP structured block");
6774 error ("invalid entry to OpenMP structured block");
6777 /* If it's obvious we have an invalid entry, be specific about the error. */
6778 if (branch_ctx == NULL)
6779 error ("invalid entry to OpenMP structured block");
6781 /* Otherwise, be vague and lazy, but efficient. */
6782 error ("invalid branch to/from an OpenMP structured block");
6784 gsi_replace (gsi_p, gimple_build_nop (), false);
6788 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6789 where each label is found. */
6792 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6793 struct walk_stmt_info *wi)
6795 gimple context = (gimple) wi->info;
6796 gimple inner_context;
6797 gimple stmt = gsi_stmt (*gsi_p);
6799 *handled_ops_p = true;
6801 switch (gimple_code (stmt))
6805 case GIMPLE_OMP_PARALLEL:
6806 case GIMPLE_OMP_TASK:
6807 case GIMPLE_OMP_SECTIONS:
6808 case GIMPLE_OMP_SINGLE:
6809 case GIMPLE_OMP_SECTION:
6810 case GIMPLE_OMP_MASTER:
6811 case GIMPLE_OMP_ORDERED:
6812 case GIMPLE_OMP_CRITICAL:
6813 /* The minimal context here is just the current OMP construct. */
6814 inner_context = stmt;
6815 wi->info = inner_context;
6816 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6820 case GIMPLE_OMP_FOR:
6821 inner_context = stmt;
6822 wi->info = inner_context;
6823 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6825 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6826 diagnose_sb_1, NULL, wi);
6827 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6832 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6833 (splay_tree_value) context);
6843 /* Pass 2: Check each branch and see if its context differs from that of
6844 the destination label's context. */
6847 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6848 struct walk_stmt_info *wi)
6850 gimple context = (gimple) wi->info;
6852 gimple stmt = gsi_stmt (*gsi_p);
6854 *handled_ops_p = true;
6856 switch (gimple_code (stmt))
6860 case GIMPLE_OMP_PARALLEL:
6861 case GIMPLE_OMP_TASK:
6862 case GIMPLE_OMP_SECTIONS:
6863 case GIMPLE_OMP_SINGLE:
6864 case GIMPLE_OMP_SECTION:
6865 case GIMPLE_OMP_MASTER:
6866 case GIMPLE_OMP_ORDERED:
6867 case GIMPLE_OMP_CRITICAL:
6869 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6873 case GIMPLE_OMP_FOR:
6875 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6877 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6878 diagnose_sb_2, NULL, wi);
6879 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6885 tree lab = gimple_cond_true_label (stmt);
6888 n = splay_tree_lookup (all_labels,
6889 (splay_tree_key) lab);
6890 diagnose_sb_0 (gsi_p, context,
6891 n ? (gimple) n->value : NULL);
6893 lab = gimple_cond_false_label (stmt);
6896 n = splay_tree_lookup (all_labels,
6897 (splay_tree_key) lab);
6898 diagnose_sb_0 (gsi_p, context,
6899 n ? (gimple) n->value : NULL);
6906 tree lab = gimple_goto_dest (stmt);
6907 if (TREE_CODE (lab) != LABEL_DECL)
6910 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6911 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6918 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6920 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6921 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6922 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6929 diagnose_sb_0 (gsi_p, context, NULL);
6940 diagnose_omp_structured_block_errors (void)
6942 struct walk_stmt_info wi;
6943 gimple_seq body = gimple_body (current_function_decl);
6945 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6947 memset (&wi, 0, sizeof (wi));
6948 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6950 memset (&wi, 0, sizeof (wi));
6951 wi.want_locations = true;
6952 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6954 splay_tree_delete (all_labels);
6961 gate_diagnose_omp_blocks (void)
6963 return flag_openmp != 0;
6966 struct gimple_opt_pass pass_diagnose_omp_blocks =
6970 "*diagnose_omp_blocks", /* name */
6971 gate_diagnose_omp_blocks, /* gate */
6972 diagnose_omp_structured_block_errors, /* execute */
6975 0, /* static_pass_number */
6976 TV_NONE, /* tv_id */
6977 PROP_gimple_any, /* properties_required */
6978 0, /* properties_provided */
6979 0, /* properties_destroyed */
6980 0, /* todo_flags_start */
6981 0, /* todo_flags_finish */
6985 #include "gt-omp-low.h"