From 01914336a927902b9a4e726e41018b5e1223fcb6 Mon Sep 17 00:00:00 2001 From: Martin Jambor Date: Wed, 14 Dec 2016 23:36:45 +0100 Subject: [PATCH] Coding style fixes 2016-12-14 Martin Jambor * omp-offload.c: Fix coding style. * omp-expand.c: Likewise. * omp-general.c: Likewise. * omp-grid.c: Likewise. * omp-low.c: Fix coding style of parts touched by the previous splitting patch. From-SVN: r243674 --- gcc/ChangeLog | 9 ++++++ gcc/omp-expand.c | 95 ++++++++++++++++++++++++++++--------------------------- gcc/omp-general.c | 10 +++--- gcc/omp-grid.c | 41 +++++++++++++----------- gcc/omp-low.c | 43 ++++++++++++------------- gcc/omp-offload.c | 22 ++++++------- 6 files changed, 117 insertions(+), 103 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 0afefdb..b3a6131 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,14 @@ 2016-12-14 Martin Jambor + * omp-offload.c: Fix coding style. + * omp-expand.c: Likewise. + * omp-general.c: Likewise. + * omp-grid.c: Likewise. + * omp-low.c: Fix coding style of parts touched by the + previous splitting patch. + +2016-12-14 Martin Jambor + * omp-general.h: New file. * omp-general.c: New file. * omp-expand.h: Likewise. diff --git a/gcc/omp-expand.c b/gcc/omp-expand.c index a953c8b..1f1055c 100644 --- a/gcc/omp-expand.c +++ b/gcc/omp-expand.c @@ -137,7 +137,7 @@ is_combined_parallel (struct omp_region *region) Is lowered into: - # BLOCK 2 (PAR_ENTRY_BB) + # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) @@ -1202,7 +1202,7 @@ expand_omp_taskreg (struct omp_region *region) if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) - == gimple_omp_taskreg_data_arg (entry_stmt)) + == gimple_omp_taskreg_data_arg (entry_stmt)) { parcopy_stmt = stmt; break; @@ -1219,7 +1219,7 @@ expand_omp_taskreg (struct omp_region *region) gsi_remove (&gsi, true); else { - /* ?? Is setting the subcode really necessary ?? */ + /* ?? Is setting the subcode really necessary ?? */ gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg)); gimple_assign_set_rhs1 (parcopy_stmt, arg); } @@ -1317,7 +1317,7 @@ expand_omp_taskreg (struct omp_region *region) set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb); } /* When the OMP expansion process cannot guarantee an up-to-date - loop tree arrange for the child function to fixup loops. */ + loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; @@ -1401,7 +1401,7 @@ expand_omp_taskreg (struct omp_region *region) struct oacc_collapse { - tree base; /* Base value. */ + tree base; /* Base value. */ tree iters; /* Number of steps. */ tree step; /* step size. */ }; @@ -1449,7 +1449,7 @@ expand_oacc_collapse_init (const struct omp_for_data *fd, e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE, true, GSI_SAME_STMT); - /* Convert the step, avoiding possible unsigned->signed overflow. */ + /* Convert the step, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); if (negating) s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); @@ -1459,7 +1459,7 @@ expand_oacc_collapse_init (const struct omp_for_data *fd, s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE, true, GSI_SAME_STMT); - /* Determine the range, avoiding possible unsigned->signed overflow. */ + /* Determine the range, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (iter_type); expr = fold_build2 (MINUS_EXPR, plus_type, fold_convert (plus_type, negating ? b : e), @@ -2749,7 +2749,7 @@ expand_omp_for_generic (struct omp_region *region, t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, - true, GSI_SAME_STMT); + true, GSI_SAME_STMT); if (arr && !TREE_STATIC (arr)) { tree clobber = build_constructor (TREE_TYPE (arr), NULL); @@ -3696,7 +3696,7 @@ find_phi_with_arg_on_edge (tree arg, edge e) if the loop is not entered L0: s0 = (trip * nthreads + threadid) * CHUNK; - e0 = min(s0 + CHUNK, n); + e0 = min (s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; @@ -4136,7 +4136,8 @@ expand_omp_for_static_chunk (struct omp_region *region, find_edge (cont_bb, trip_update_bb)->flags = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; - redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb); + redirect_edge_and_branch (single_succ_edge (trip_update_bb), + iter_part_bb); } if (gimple_in_ssa_p (cfun)) @@ -4319,9 +4320,9 @@ expand_cilk_for (struct omp_region *region, struct omp_for_data *fd) where we should put low and high (reasoning given in header comment). */ - tree child_fndecl - = gimple_omp_parallel_child_fn ( - as_a (last_stmt (region->outer->entry))); + gomp_parallel *par_stmt + = as_a (last_stmt (region->outer->entry)); + tree child_fndecl = gimple_omp_parallel_child_fn (par_stmt); tree t, low_val = NULL_TREE, high_val = NULL_TREE; for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t)) { @@ -4802,7 +4803,7 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd) the loop. */ if ((flag_tree_loop_vectorize || (!global_options_set.x_flag_tree_loop_vectorize - && !global_options_set.x_flag_tree_vectorize)) + && !global_options_set.x_flag_tree_vectorize)) && flag_tree_loop_optimize && loop->safelen > 1) { @@ -5373,7 +5374,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT); e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT); - /* Convert the step, avoiding possible unsigned->signed overflow. */ + /* Convert the step, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); if (negating) s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); @@ -5387,7 +5388,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) expr = fold_convert (diff_type, chunk_size); chunk_size = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); - /* Determine the range, avoiding possible unsigned->signed overflow. */ + /* Determine the range, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (iter_type); expr = fold_build2 (MINUS_EXPR, plus_type, fold_convert (plus_type, negating ? b : e), @@ -5432,7 +5433,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&gsi, true); - /* Fixup edges from head_bb */ + /* Fixup edges from head_bb. */ be = BRANCH_EDGE (head_bb); fte = FALLTHRU_EDGE (head_bb); be->flags |= EDGE_FALSE_VALUE; @@ -5522,7 +5523,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) /* Remove the GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); - /* Fixup edges from cont_bb */ + /* Fixup edges from cont_bb. */ be = BRANCH_EDGE (cont_bb); fte = FALLTHRU_EDGE (cont_bb); be->flags |= EDGE_TRUE_VALUE; @@ -5532,7 +5533,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) { /* Split the beginning of exit_bb to make bottom_bb. We need to insert a nop at the start, because splitting is - after a stmt, not before. */ + after a stmt, not before. */ gsi = gsi_start_bb (exit_bb); stmt = gimple_build_nop (); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); @@ -5552,7 +5553,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) gsi_insert_after (&gsi, gimple_build_cond_empty (expr), GSI_CONTINUE_LINKING); - /* Fixup edges from bottom_bb. */ + /* Fixup edges from bottom_bb. */ split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE); } @@ -5577,7 +5578,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) gsi_insert_before (&gsi, ass, GSI_SAME_STMT); } - /* Remove the OMP_RETURN. */ + /* Remove the OMP_RETURN. */ gsi_remove (&gsi, true); if (cont_bb) @@ -5779,7 +5780,7 @@ expand_omp_sections (struct omp_region *region) si = gsi_last_bb (e->dest); l2 = NULL_TREE; if (gsi_end_p (si) - || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) + || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) l2 = gimple_block_label (e->dest); else FOR_EACH_EDGE (e, ei, l0_bb->succs) @@ -6277,7 +6278,7 @@ expand_omp_atomic_fetch_op (basic_block load_bb, oldval = *addr; repeat: - newval = rhs; // with oldval replacing *addr in rhs + newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; @@ -6398,11 +6399,11 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, if (iaddr == addr) storedi = stored_val; else - storedi = - force_gimple_operand_gsi (&si, - build1 (VIEW_CONVERT_EXPR, itype, - stored_val), true, NULL_TREE, true, - GSI_SAME_STMT); + storedi + = force_gimple_operand_gsi (&si, + build1 (VIEW_CONVERT_EXPR, itype, + stored_val), true, NULL_TREE, true, + GSI_SAME_STMT); /* Build the compare&swap statement. */ new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi); @@ -6427,9 +6428,8 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ - stmt = gimple_build_cond_empty - (build2 (NE_EXPR, boolean_type_node, - new_storedi, old_vali)); + tree ne = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali); + stmt = gimple_build_cond_empty (ne); gsi_insert_before (&si, stmt, GSI_SAME_STMT); /* Update cfg. */ @@ -6463,9 +6463,9 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, /* A subroutine of expand_omp_atomic. Implement the atomic operation as: - GOMP_atomic_start (); - *addr = rhs; - GOMP_atomic_end (); + GOMP_atomic_start (); + *addr = rhs; + GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to @@ -6522,7 +6522,7 @@ expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb, } /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand - using expand_omp_atomic_fetch_op. If it failed, we try to + using expand_omp_atomic_fetch_op. If it failed, we try to call expand_omp_atomic_pipeline, and if it fails too, the ultimate fallback is wrapping the operation in a mutex (expand_omp_atomic_mutex). REGION is the atomic region built @@ -6618,7 +6618,9 @@ mark_loops_in_oacc_kernels_region (basic_block region_entry, if (nr_outer_loops != 1) return; - for (struct loop *loop = single_outer->inner; loop != NULL; loop = loop->inner) + for (struct loop *loop = single_outer->inner; + loop != NULL; + loop = loop->inner) if (loop->next) return; @@ -6800,7 +6802,7 @@ push_target_argument_according_to_value (gimple_stmt_iterator *gsi, int device, } } -/* Create an array of arguments that is then passed to GOMP_target. */ +/* Create an array of arguments that is then passed to GOMP_target. */ static tree get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt) @@ -6828,8 +6830,8 @@ get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt) if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)) { - t = get_target_argument_identifier (GOMP_DEVICE_HSA, true, - GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES); + int id = GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES; + t = get_target_argument_identifier (GOMP_DEVICE_HSA, true, id); args.quick_push (t); args.quick_push (grid_get_kernel_launch_attributes (gsi, tgt_stmt)); } @@ -7378,7 +7380,7 @@ expand_omp_target (struct omp_region *region) /* Expand KFOR loop as a HSA grifidied kernel, i.e. as a body only with iteration variable derived from the thread number. INTRA_GROUP means this is an expansion of a loop iterating over work-items within a separate - iteration over groups. */ + iteration over groups. */ static void grid_expand_omp_for_loop (struct omp_region *kfor, bool intra_group) @@ -7390,7 +7392,7 @@ grid_expand_omp_for_loop (struct omp_region *kfor, bool intra_group) size_t collapse = gimple_omp_for_collapse (for_stmt); struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, - gimple_omp_for_collapse (for_stmt)); + gimple_omp_for_collapse (for_stmt)); struct omp_for_data fd; remove_edge (BRANCH_EDGE (kfor->entry)); @@ -7448,7 +7450,7 @@ grid_expand_omp_for_loop (struct omp_region *kfor, bool intra_group) gassign *assign_stmt = gimple_build_assign (startvar, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } - /* Remove the omp for statement */ + /* Remove the omp for statement. */ gsi = gsi_last_bb (kfor->entry); gsi_remove (&gsi, true); @@ -7500,7 +7502,7 @@ grid_remap_kernel_arg_accesses (tree *tp, int *walk_subtrees, void *data) } /* If TARGET region contains a kernel body for loop, remove its region from the - TARGET and expand it in HSA gridified kernel fashion. */ + TARGET and expand it in HSA gridified kernel fashion. */ static void grid_expand_target_grid_body (struct omp_region *target) @@ -7534,7 +7536,8 @@ grid_expand_target_grid_body (struct omp_region *target) gcc_assert (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)); - tree inside_block = gimple_block (first_stmt (single_succ (gpukernel->entry))); + tree inside_block + = gimple_block (first_stmt (single_succ (gpukernel->entry))); *pp = gpukernel->next; for (pp = &gpukernel->inner; *pp; pp = &(*pp)->next) if ((*pp)->type == GIMPLE_OMP_FOR) @@ -7596,7 +7599,7 @@ grid_expand_target_grid_body (struct omp_region *target) grid_expand_omp_for_loop (kfor, false); - /* Remove the omp for statement */ + /* Remove the omp for statement. */ gimple_stmt_iterator gsi = gsi_last_bb (gpukernel->entry); gsi_remove (&gsi, true); /* Replace the GIMPLE_OMP_RETURN at the end of the kernel region with a real @@ -7685,7 +7688,7 @@ expand_omp (struct omp_region *region) gimple *inner_stmt = NULL; /* First, determine whether this is a combined parallel+workshare - region. */ + region. */ if (region->type == GIMPLE_OMP_PARALLEL) determine_parallel_type (region); else if (region->type == GIMPLE_OMP_TARGET) diff --git a/gcc/omp-general.c b/gcc/omp-general.c index 0cad8a5..cac9bed 100644 --- a/gcc/omp-general.c +++ b/gcc/omp-general.c @@ -418,7 +418,7 @@ omp_max_vf (void) || !flag_tree_loop_optimize || (!flag_tree_loop_vectorize && (global_options_set.x_flag_tree_loop_vectorize - || global_options_set.x_flag_tree_vectorize))) + || global_options_set.x_flag_tree_vectorize))) return 1; int vf = 1; @@ -442,7 +442,7 @@ omp_max_simt_vf (void) if (!optimize) return 0; if (ENABLE_OFFLOADING) - for (const char *c = getenv ("OFFLOAD_TARGET_NAMES"); c; ) + for (const char *c = getenv ("OFFLOAD_TARGET_NAMES"); c;) { if (!strncmp (c, "nvptx", strlen ("nvptx"))) return 32; @@ -481,7 +481,7 @@ oacc_launch_pack (unsigned code, tree device, unsigned op) represented as a list of INTEGER_CST. Those that are runtime exprs are represented as an INTEGER_CST of zero. - TOOO. Normally the attribute will just contain a single such list. If + TODO: Normally the attribute will just contain a single such list. If however it contains a list of lists, this will represent the use of device_type. Each member of the outer list is an assoc list of dimensions, keyed by the device type. The first entry will be the @@ -566,8 +566,8 @@ tree oacc_build_routine_dims (tree clauses) { /* Must match GOMP_DIM ordering. */ - static const omp_clause_code ids[] = - {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ}; + static const omp_clause_code ids[] + = {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ}; int ix; int level = -1; diff --git a/gcc/omp-grid.c b/gcc/omp-grid.c index 81f6ea5..2b469f2 100644 --- a/gcc/omp-grid.c +++ b/gcc/omp-grid.c @@ -48,7 +48,7 @@ omp_grid_lastprivate_predicate (struct omp_for_data *fd) /* When dealing with a gridified loop, we need to check up to three collapsed iteration variables but they are not actually captured in this fd. Fortunately, we can easily rely on HSA builtins to get this - information. */ + information. */ tree id, size; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP @@ -81,7 +81,7 @@ omp_grid_lastprivate_predicate (struct omp_for_data *fd) } /* Structure describing the basic properties of the loop we ara analyzing - whether it can be gridified and when it is gridified. */ + whether it can be gridified and when it is gridified. */ struct grid_prop { @@ -162,8 +162,9 @@ grid_find_single_omp_among_assignments_1 (gimple_seq seq, grid_prop *grid, continue; if (gbind *bind = dyn_cast (stmt)) { - if (!grid_find_single_omp_among_assignments_1 (gimple_bind_body (bind), - grid, name, ret)) + gimple_seq bind_body = gimple_bind_body (bind); + if (!grid_find_single_omp_among_assignments_1 (bind_body, grid, name, + ret)) return false; } else if (is_gimple_omp (stmt)) @@ -325,7 +326,7 @@ grid_parallel_clauses_gridifiable (gomp_parallel *par, location_t tloc) /* Examine clauses and the body of omp loop statement GFOR and if something prevents gridification, issue a missed-optimization diagnostics and return - false, otherwise return true. GRID describes hitherto discovered properties + false, otherwise return true. GRID describes hitherto discovered properties of the loop that is evaluated for possible gridification. */ static bool @@ -414,7 +415,7 @@ grid_inner_loop_gridifiable_p (gomp_for *gfor, grid_prop *grid) /* Given distribute omp construct represented by DIST, which in the original source forms a compound construct with a looping construct, return true if it - can be turned into a gridified HSA kernel. Otherwise return false. GRID + can be turned into a gridified HSA kernel. Otherwise return false. GRID describes hitherto discovered properties of the loop that is evaluated for possible gridification. */ @@ -455,7 +456,7 @@ grid_dist_follows_simple_pattern (gomp_for *dist, grid_prop *grid) /* Given an omp loop statement GFOR, return true if it can participate in tiling gridification, i.e. in one where the distribute and parallel for loops do not form a compound statement. GRID describes hitherto discovered - properties of the loop that is evaluated for possible gridification. */ + properties of the loop that is evaluated for possible gridification. */ static bool grid_gfor_follows_tiling_pattern (gomp_for *gfor, grid_prop *grid) @@ -599,7 +600,7 @@ grid_handle_call_in_distribute (gimple_stmt_iterator *gsi) /* Given a sequence of statements within a distribute omp construct or a parallel construct, which in the original source does not form a compound construct with a looping construct, return true if it does not prevent us - from turning it into a gridified HSA kernel. Otherwise return false. GRID + from turning it into a gridified HSA kernel. Otherwise return false. GRID describes hitherto discovered properties of the loop that is evaluated for possible gridification. IN_PARALLEL must be true if seq is within a parallel construct and flase if it is only within a distribute @@ -910,7 +911,7 @@ grid_mark_variable_segment (tree var, enum grid_var_segment segment) their uses. Fortunately, we do not have to do this because if they are not addressable, it means they are not used in atomic or parallel statements and so relaxed GPU consistency rules mean we can just keep them - private. */ + private. */ if (!TREE_ADDRESSABLE (var)) return; @@ -961,7 +962,9 @@ grid_copy_leading_local_assignments (gimple_seq src, gimple_stmt_iterator *dst, (gimple_bind_body (bind), dst, tgt_bind, var_segment, wi); if (var_segment != GRID_SEGMENT_PRIVATE) - for (tree var = gimple_bind_vars (bind); var; var = DECL_CHAIN (var)) + for (tree var = gimple_bind_vars (bind); + var; + var = DECL_CHAIN (var)) grid_mark_variable_segment (var, var_segment); if (r) return r; @@ -1191,7 +1194,8 @@ grid_process_kernel_body_copy (grid_prop *grid, gimple_seq seq, gcc_assert (teams); gimple_omp_teams_set_grid_phony (teams, true); stmt = grid_copy_leading_local_assignments (gimple_omp_body (teams), dst, - tgt_bind, GRID_SEGMENT_GLOBAL, wi); + tgt_bind, GRID_SEGMENT_GLOBAL, + wi); gcc_checking_assert (stmt); gomp_for *dist = dyn_cast (stmt); gcc_assert (dist); @@ -1278,7 +1282,8 @@ grid_attempt_target_gridification (gomp_target *target, gomp_for *inner_loop = grid_process_kernel_body_copy (&grid, kernel_seq, gsi, tgt_bind, &wi); - gbind *old_bind = as_a (gimple_seq_first (gimple_omp_body (target))); + gbind *old_bind + = as_a (gimple_seq_first (gimple_omp_body (target))); gbind *new_bind = as_a (gimple_seq_first (kernel_seq)); tree new_block = gimple_bind_block (new_bind); tree enc_block = BLOCK_SUPERCONTEXT (gimple_bind_block (old_bind)); @@ -1324,11 +1329,11 @@ grid_attempt_target_gridification (gomp_target *target, else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); if (grid.tiling) - { - if (cond_code == GT_EXPR) - step = fold_build1 (NEGATE_EXPR, itype, step); - t = fold_build2 (MULT_EXPR, itype, t, step); - } + { + if (cond_code == GT_EXPR) + step = fold_build1 (NEGATE_EXPR, itype, step); + t = fold_build2 (MULT_EXPR, itype, t, step); + } tree gs = fold_convert (uint32_type_node, t); gimple_seq tmpseq = NULL; @@ -1360,7 +1365,7 @@ grid_attempt_target_gridification (gomp_target *target, return; } -/* Walker function doing all the work for create_target_kernels. */ +/* Walker function doing all the work for create_target_kernels. */ static tree grid_gridify_all_targets_stmt (gimple_stmt_iterator *gsi, diff --git a/gcc/omp-low.c b/gcc/omp-low.c index 4fb59eb40..e69b2b2 100644 --- a/gcc/omp-low.c +++ b/gcc/omp-low.c @@ -2516,7 +2516,7 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx) if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP) { bool ok = false; - + if (ctx) switch (gimple_code (ctx->stmt)) { @@ -3431,7 +3431,7 @@ omp_clause_aligned_alignment (tree clause) && GET_MODE_SIZE (vmode) < vs && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode) vmode = GET_MODE_2XWIDER_MODE (vmode); - + tree type = lang_hooks.types.type_for_mode (mode, 1); if (type == NULL_TREE || TYPE_MODE (type) != mode) continue; @@ -4851,7 +4851,7 @@ lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner, var = orig; incoming = outgoing = var; - + if (!inner) { /* See if an outer construct also reduces this variable. */ @@ -4879,7 +4879,7 @@ lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner, default: goto do_lookup; } - + outer = probe; for (; cls; cls = OMP_CLAUSE_CHAIN (cls)) if (OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_REDUCTION @@ -4927,14 +4927,14 @@ lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner, } incoming = outgoing = (t ? t : orig); } - + has_outer_reduction:; } if (!ref_to_res) ref_to_res = integer_zero_node; - if (omp_is_reference (orig)) + if (omp_is_reference (orig)) { tree type = TREE_TYPE (var); const char *id = IDENTIFIER_POINTER (DECL_NAME (var)); @@ -5859,9 +5859,9 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx) new_body = maybe_catch_exception (new_body); - t = gimple_build_omp_return - (!!omp_find_clause (gimple_omp_sections_clauses (stmt), - OMP_CLAUSE_NOWAIT)); + bool nowait = omp_find_clause (gimple_omp_sections_clauses (stmt), + OMP_CLAUSE_NOWAIT) != NULL_TREE; + t = gimple_build_omp_return (nowait); gimple_seq_add_stmt (&new_body, t); maybe_add_implicit_barrier_cancel (ctx, &new_body); @@ -5993,7 +5993,6 @@ static void lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block; - gimple *t; gomp_single *single_stmt = as_a (gsi_stmt (*gsi_p)); gbind *bind; gimple_seq bind_body, bind_body_tail = NULL, dlist; @@ -6022,10 +6021,10 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx) bind_body = maybe_catch_exception (bind_body); - t = gimple_build_omp_return - (!!omp_find_clause (gimple_omp_single_clauses (single_stmt), - OMP_CLAUSE_NOWAIT)); - gimple_seq_add_stmt (&bind_body_tail, t); + bool nowait = omp_find_clause (gimple_omp_single_clauses (single_stmt), + OMP_CLAUSE_NOWAIT) != NULL_TREE; + gimple *g = gimple_build_omp_return (nowait); + gimple_seq_add_stmt (&bind_body_tail, g); maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail); if (ctx->record_type) { @@ -6534,7 +6533,8 @@ lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx) } lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START); - lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl)); + lock = build_call_expr_loc (loc, lock, 1, + build_fold_addr_expr_loc (loc, decl)); unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END); unlock = build_call_expr_loc (loc, unlock, 1, @@ -6811,10 +6811,10 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx) gimple_omp_for_clauses (stmt), &oacc_head, &oacc_tail, ctx); - /* Add OpenACC partitioning and reduction markers just before the loop */ + /* Add OpenACC partitioning and reduction markers just before the loop. */ if (oacc_head) gimple_seq_add_seq (&body, oacc_head); - + lower_omp_for_lastprivate (&fd, &body, &dlist, ctx); if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR) @@ -8667,8 +8667,7 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx) lower_omp (gimple_try_cleanup_ptr (stmt), ctx); break; case GIMPLE_TRANSACTION: - lower_omp (gimple_transaction_body_ptr ( - as_a (stmt)), + lower_omp (gimple_transaction_body_ptr (as_a (stmt)), ctx); break; case GIMPLE_BIND: @@ -8961,16 +8960,14 @@ diagnose_sb_0 (gimple_stmt_iterator *gsi_p, kind = "OpenMP"; } - /* - Previously we kept track of the label's entire context in diagnose_sb_[12] + /* Previously we kept track of the label's entire context in diagnose_sb_[12] so we could traverse it and issue a correct "exit" or "enter" error message upon a structured block violation. We built the context by building a list with tree_cons'ing, but there is no easy counterpart in gimple tuples. It seems like far too much work for issuing exit/enter error messages. If someone really misses the - distinct error message... patches welcome. - */ + distinct error message... patches welcome. */ #if 0 /* Try to avoid confusing the user by producing and error message diff --git a/gcc/omp-offload.c b/gcc/omp-offload.c index fabdf2d..8c2c6eb 100644 --- a/gcc/omp-offload.c +++ b/gcc/omp-offload.c @@ -61,8 +61,8 @@ struct oacc_loop gcall *marker; /* Initial head marker. */ - gcall *heads[GOMP_DIM_MAX]; /* Head marker functions. */ - gcall *tails[GOMP_DIM_MAX]; /* Tail marker functions. */ + gcall *heads[GOMP_DIM_MAX]; /* Head marker functions. */ + gcall *tails[GOMP_DIM_MAX]; /* Tail marker functions. */ tree routine; /* Pseudo-loop enclosing a routine. */ @@ -273,7 +273,7 @@ oacc_thread_numbers (bool pos, int mask, gimple_seq *seq) operate on adjacent iterations. At the worker and gang level, each gang/warp executes a set of contiguous iterations. Chunking can override this such that each iteration engine executes a - contiguous chunk, and then moves on to stride to the next chunk. */ + contiguous chunk, and then moves on to stride to the next chunk. */ static void oacc_xform_loop (gcall *call) @@ -521,7 +521,7 @@ oacc_parse_default_dims (const char *dims) /* Validate and update the dimensions for offloaded FN. ATTRS is the raw attribute. DIMS is an array of dimensions, which is filled in. LEVEL is the partitioning level of a routine, or -1 for an offload - region itself. USED is the mask of partitioned execution in the + region itself. USED is the mask of partitioned execution in the function. */ static void @@ -638,7 +638,7 @@ new_oacc_loop (oacc_loop *parent, gcall *marker) loop->marker = marker; /* TODO: This is where device_type flattening would occur for the loop - flags. */ + flags. */ loop->flags = TREE_INT_CST_LOW (gimple_call_arg (marker, 3)); @@ -880,7 +880,7 @@ oacc_loop_sibling_nreverse (oacc_loop *loop) do { if (loop->child) - loop->child = oacc_loop_sibling_nreverse (loop->child); + loop->child = oacc_loop_sibling_nreverse (loop->child); oacc_loop *next = loop->sibling; loop->sibling = last; @@ -1066,8 +1066,8 @@ oacc_loop_fixed_partitions (oacc_loop *loop, unsigned outer_mask) loop->flags &= ~OLF_AUTO; if (seq_par) { - loop->flags &= - ~((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1) << OLF_DIM_BASE); + loop->flags + &= ~((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1) << OLF_DIM_BASE); this_mask = 0; } } @@ -1183,14 +1183,14 @@ oacc_loop_auto_partitions (oacc_loop *loop, unsigned outer_mask) /* Allocate the loop at the innermost available level. */ unsigned this_mask = 0; - /* Determine the outermost partitioning used within this loop. */ + /* Determine the outermost partitioning used within this loop. */ this_mask = loop->inner | GOMP_DIM_MASK (GOMP_DIM_MAX); this_mask = least_bit_hwi (this_mask); /* Pick the partitioning just inside that one. */ this_mask >>= 1; - /* And avoid picking one use by an outer loop. */ + /* And avoid picking one use by an outer loop. */ this_mask &= ~outer_mask; if (!this_mask && noisy) @@ -1476,7 +1476,7 @@ default_goacc_validate_dims (tree ARG_UNUSED (decl), int *dims, return changed; } -/* Default dimension bound is unknown on accelerator and 1 on host. */ +/* Default dimension bound is unknown on accelerator and 1 on host. */ int default_goacc_dim_limit (int ARG_UNUSED (axis)) -- 2.7.4