gcc/
* basic-block.h (FOR_EACH_BB): Eliminate macro.
* asan.c (transform_statements, execute_sanopt): Eliminate
use of FOR_EACH_BB in favor of FOR_EACH_BB_FN, to make use of cfun
explicit.
* auto-inc-dec.c (rest_of_handle_auto_inc_dec): Likewise.
* bb-reorder.c (find_rarely_executed_basic_blocks_and_crossing_edges,
set_edge_can_fallthru_flag, fix_up_fall_thru_edges,
fix_crossing_unconditional_branches, add_reg_crossing_jump_notes,
insert_section_boundary_note, rest_of_handle_reorder_blocks,
duplicate_computed_gotos): Likewise.
* cfg.c (clear_edges, compact_blocks, brief_dump_cfg): Likewise.
* cfganal.c (find_unreachable_blocks, add_noreturn_fake_exit_edges,
compute_dominance_frontiers_1, single_pred_before_succ_order): Likewise.
* cfgbuild.c (find_many_sub_basic_blocks): Likewise.
* cfgcleanup.c (try_optimize_cfg, delete_dead_jumptables): Likewise.
* cfgexpand.c (add_scope_conflicts, discover_nonconstant_array_refs):
Likewise.
* cfgloop.c (flow_loops_cfg_dump, get_loop_body, record_loop_exits,
verify_loop_structure): Likewise.
* cfgloopanal.c (mark_loop_exit_edges): Likewise.
* cfgrtl.c (compute_bb_for_insn, find_partition_fixes,
verify_hot_cold_block_grouping, purge_all_dead_edges,
fixup_abnormal_edges, record_effective_endpoints,
outof_cfg_layout_mode, fixup_reorder_chain, force_one_exit_fallthru,
break_superblocks): Likewise.
* cgraphbuild.c (build_cgraph_edges, rebuild_cgraph_edges,
cgraph_rebuild_references): Likewise.
* combine-stack-adj.c (combine_stack_adjustments): Likewise.
* combine.c (delete_noop_moves, create_log_links,
combine_instructions): Likewise.
* config/arm/arm.c (thumb1_reorg, thumb2_reorg): Likewise.
* config/bfin/bfin.c (bfin_gen_bundles, reorder_var_tracking_notes):
Likewise.
* config/c6x/c6x.c (c6x_gen_bundles, conditionalize_after_sched,
c6x_reorg): Likewise.
* config/epiphany/resolve-sw-modes.c (resolve_sw_modes): Likewise.
* config/frv/frv.c (frv_optimize_membar): Likewise.
* config/i386/i386.c (ix86_finalize_stack_realign_flags): Likewise.
* config/ia64/ia64.c (ia64_reorg): Likewise.
* config/mips/mips.c (mips_annotate_pic_calls): Likewise.
* config/picochip/picochip.c (reorder_var_tracking_notes): Likewise.
* config/rs6000/rs6000.c (rs6000_alloc_sdmode_stack_slot): Likewise.
* config/s390/s390.c (s390_regs_ever_clobbered): Likewise.
* config/sh/sh_treg_combine.cc (sh_treg_combine::execute): Likewise.
* config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
* config/tilegx/tilegx.c (tilegx_gen_bundles,
reorder_var_tracking_notes): Likewise.
* config/tilepro/tilepro.c (tilepro_gen_bundles,
reorder_var_tracking_notes): Likewise.
* coverage.c (coverage_compute_cfg_checksum): Likewise.
* cprop.c (compute_hash_table_work, compute_cprop_data,
local_cprop_pass, find_implicit_sets): Likewise.
* cse.c (cse_condition_code_reg): Likewise.
* dce.c (prescan_insns_for_dce): Likewise.
* df-core.c (df_compact_blocks): Likewise.
* df-problems.c (df_word_lr_alloc): Likewise.
* df-scan.c (df_scan_start_dump, df_scan_blocks, df_insn_rescan_all,
df_update_entry_exit_and_calls): Likewise.
* dominance.c (calculate_dominance_info, verify_dominators,
debug_dominance_info): Likewise.
* dse.c (dse_step5_nospill): Likewise.
* except.c (finish_eh_generation): Likewise.
* final.c (compute_alignments): Likewise.
* function.c (thread_prologue_and_epilogue_insns,
rest_of_match_asm_constraints): Likewise.
* gcse.c (compute_hash_table_work, prune_expressions,
compute_pre_data, compute_code_hoist_vbeinout, hoist_code,
calculate_bb_reg_pressure, compute_ld_motion_mems): Likewise.
* gimple-iterator.c (gsi_commit_edge_inserts): Likewise.
* gimple-ssa-isolate-paths.c (find_implicit_erroneous_behaviour,
find_explicit_erroneous_behaviour): Likewise.
* graphite-sese-to-poly.c (rewrite_reductions_out_of_ssa,
rewrite_cross_bb_scalar_deps_out_of_ssa): Likewise.
* haifa-sched.c (haifa_sched_init): Likewise.
* hw-doloop.c (discover_loops, set_bb_indices, reorder_loops):
Likewise.
* ifcvt.c (if_convert): Likewise.
* init-regs.c (initialize_uninitialized_regs): Likewise.
* ipa-prop.c (ipcp_transform_function): Likewise.
* ipa-pure-const.c (analyze_function): Likewise.
* ipa-split.c (find_split_points, execute_split_functions): Likewise.
* ira-build.c (form_loop_tree): Likewise.
* ira-costs.c (find_costs_and_classes): Likewise.
* ira-emit.c (emit_moves, add_ranges_and_copies, ira_emit): Likewise.
* ira.c (decrease_live_ranges_number, compute_regs_asm_clobbered,
mark_elimination, update_equiv_regs, find_moveable_pseudos,
split_live_ranges_for_shrink_wrap, allocate_initial_values): Likewise.
* jump.c (mark_all_labels): Likewise.
* lcm.c (compute_laterin, compute_insert_delete, compute_available,
compute_nearerout, compute_rev_insert_delete): Likewise.
* loop-init.c (fix_loop_structure): Likewise.
* loop-invariant.c (calculate_loop_reg_pressure): Likewise.
* lower-subreg.c (decompose_multiword_subregs,
decompose_multiword_subregs): Likewise.
* lra-assigns.c (assign_by_spills): Likewise.
* lra-coalesce.c (lra_coalesce): Likewise.
* lra-constraints.c (lra_inheritance, remove_inheritance_pseudos):
Likewise.
* lra-eliminations.c (lra_init_elimination): Likewise.
* lra-spills.c (assign_spill_hard_regs, spill_pseudos,
lra_final_code_change): Likewise.
* lra.c (remove_scratches, check_rtl, has_nonexceptional_receiver,
update_inc_notes): Likewise.
* mcf.c (adjust_cfg_counts): Likewise.
* mode-switching.c (optimize_mode_switching): Likewise.
* modulo-sched.c (rest_of_handle_sms): Likewise.
* omp-low.c (optimize_omp_library_calls, expand_omp_taskreg,
expand_omp_target): Likewise.
* postreload-gcse.c (alloc_mem, compute_hash_table): Likewise.
* postreload.c (reload_cse_regs_1): Likewise.
* predict.c (strip_predict_hints, tree_bb_level_predictions,
tree_estimate_probability, expensive_function_p,
estimate_bb_frequencies, compute_function_frequency): Likewise.
* profile.c (is_inconsistent, compute_branch_probabilities,
branch_prob): Likewise.
* ree.c (find_removable_extensions): Likewise.
* reg-stack.c (compensate_edges, convert_regs, reg_to_stack): Likewise.
* regcprop.c (copyprop_hardreg_forward): Likewise.
* reginfo.c (init_subregs_of_mode): Likewise.
* regrename.c (regrename_analyze): Likewise.
* regstat.c (regstat_compute_ri, regstat_compute_calls_crossed):
Likewise.
* reload1.c (has_nonexceptional_receiver, reload,
calculate_elim_costs_all_insns): Likewise.
* resource.c (init_resource_info, free_resource_info): Likewise.
* sched-ebb.c (schedule_ebbs): Likewise.
* sched-rgn.c (is_cfg_nonregular, find_single_block_region,
haifa_find_rgns, sched_rgn_local_init): Likewise.
* sel-sched-dump.c (sel_dump_cfg_2): Likewise.
* sel-sched-ir.c (init_lv_sets, free_lv_sets,
make_regions_from_the_rest): Likewise.
* sese.c (build_sese_loop_nests, sese_build_liveouts): Likewise.
* stack-ptr-mod.c (notice_stack_pointer_modification): Likewise.
* store-motion.c (compute_store_table, build_store_vectors,
one_store_motion_pass): Likewise.
* tracer.c (tail_duplicate): Likewise.
* trans-mem.c (compute_transaction_bits): Likewise.
* tree-call-cdce.c (tree_call_cdce): Likewise.
* tree-cfg.c (replace_loop_annotate, factor_computed_gotos,
fold_cond_expr_cond, make_edges, assign_discriminators,
make_abnormal_goto_edges, cleanup_dead_labels, group_case_labels,
dump_cfg_stats, gimple_verify_flow_info, print_loop,
execute_fixup_cfg): Likewise.
* tree-cfgcleanup.c (cleanup_tree_cfg_1, merge_phi_nodes): Likewise.
* tree-complex.c (init_dont_simulate_again, tree_lower_complex):
Likewise.
* tree-dfa.c (collect_dfa_stats, dump_enumerated_decls): Likewise.
* tree-eh.c (execute_lower_resx, execute_lower_eh_dispatch,
mark_reachable_handlers): Likewise.
* tree-emutls.c (lower_emutls_function_body): Likewise.
* tree-if-conv.c (main_tree_if_conversion): Likewise.
* tree-inline.c (optimize_inline_calls): Likewise.
* tree-into-ssa.c (rewrite_into_ssa, update_ssa): Likewise.
* tree-nrv.c (tree_nrv, execute_return_slot_opt): Likewise.
* tree-object-size.c (compute_object_sizes): Likewise.
* tree-outof-ssa.c (eliminate_useless_phis, rewrite_trees,
insert_backedge_copies, tree_profiling): Likewise.
* tree-scalar-evolution.c (scev_const_prop): Likewise.
* tree-sra.c (scan_function, sra_modify_function_body,
propagate_dereference_distances, ipa_sra_modify_function_body,
convert_callers): Likewise.
* tree-ssa-ccp.c (ccp_initialize, execute_fold_all_builtins): Likewise.
* tree-ssa-coalesce.c (build_ssa_conflict_graph): Likewise.
create_outofssa_var_map, coalesce_partitions): Likewise.
* tree-ssa-copy.c (init_copy_prop): Likewise.
* tree-ssa-copyrename.c (rename_ssa_copies): Likewise.
* tree-ssa-dce.c (find_obviously_necessary_stmts,
eliminate_unnecessary_stmts): Likewise.
* tree-ssa-dom.c (free_all_edge_infos, tree_ssa_dominator_optimize):
Likewise.
* tree-ssa-forwprop.c (ssa_forward_propagate_and_combine): Likewise.
* tree-ssa-live.c (clear_unused_block_pointer, remove_unused_locals,
new_tree_live_info, calculate_live_on_exit, dump_live_info,
analyze_memory_references, fill_always_executed_in,
tree_ssa_lim_finalize): Likewise.
* tree-ssa-loop-manip.c (find_uses_to_rename, verify_loop_closed_ssa):
Likewise.
* tree-ssa-math-opts.c (execute_cse_reciprocals, execute_cse_sincos,
execute_optimize_bswap, execute_optimize_widening_mul): Likewise.
* tree-ssa-propagate.c (substitute_and_fold): Likewise.
* tree-ssa-structalias.c (compute_points_to_sets): Likewise.
* tree-ssa-tail-merge.c (find_same_succ, reset_cluster_vectors):
Likewise.
* tree-ssa-ter.c (find_replaceable_exprs): Likewise.
* tree-ssa-threadupdate.c (thread_through_all_blocks): Likewise.
* tree-ssa-uncprop.c (associate_equivalences_with_edges,
tree_ssa_uncprop): Likewise.
* tree-ssa-uninit.c (warn_uninitialized_vars,
execute_late_warn_uninitialized): Likewise.
* tree-ssa.c (verify_ssa, execute_update_addresses_taken): Likewise.
* tree-stdarg.c (check_all_va_list_escapes, execute_optimize_stdarg):
Likewise.
* tree-switch-conversion.c (do_switchconv): Likewise.
* tree-vect-generic.c (expand_vector_operations): Likewise.
* tree-vectorizer.c (adjust_simduid_builtins, note_simd_array_uses,
execute_vect_slp): Likewise.
* tree-vrp.c (check_all_array_refs, remove_range_assertions,
vrp_initialize, identify_jump_threads, instrument_memory_accesses):
Likewise.
* ubsan.c (ubsan_pass): Likewise.
* value-prof.c (verify_histograms, gimple_value_profile_transformations,
gimple_find_values_to_profile): Likewise.
* var-tracking.c (vt_find_locations, dump_dataflow_sets, vt_emit_notes,
vt_initialize, delete_debug_insns, vt_finalize): Likewise.
gcc/testsuite/
* g++.dg/plugin/selfassign.c (execute_warn_self_assign): Eliminate
use of FOR_EACH_BB in favor of FOR_EACH_BB_FN, to make use of cfun
explicit.
* gcc.dg/plugin/selfassign.c (execute_warn_self_assign): Likewise.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@205828
138bc75d-0d04-0410-961f-
82ee72b054a4
2013-12-09 David Malcolm <dmalcolm@redhat.com>
+ * basic-block.h (FOR_EACH_BB): Eliminate macro.
+
+ * asan.c (transform_statements, execute_sanopt): Eliminate
+ use of FOR_EACH_BB in favor of FOR_EACH_BB_FN, to make use of cfun
+ explicit.
+ * auto-inc-dec.c (rest_of_handle_auto_inc_dec): Likewise.
+ * bb-reorder.c (find_rarely_executed_basic_blocks_and_crossing_edges,
+ set_edge_can_fallthru_flag, fix_up_fall_thru_edges,
+ fix_crossing_unconditional_branches, add_reg_crossing_jump_notes,
+ insert_section_boundary_note, rest_of_handle_reorder_blocks,
+ duplicate_computed_gotos): Likewise.
+ * cfg.c (clear_edges, compact_blocks, brief_dump_cfg): Likewise.
+ * cfganal.c (find_unreachable_blocks, add_noreturn_fake_exit_edges,
+ compute_dominance_frontiers_1, single_pred_before_succ_order): Likewise.
+ * cfgbuild.c (find_many_sub_basic_blocks): Likewise.
+ * cfgcleanup.c (try_optimize_cfg, delete_dead_jumptables): Likewise.
+ * cfgexpand.c (add_scope_conflicts, discover_nonconstant_array_refs):
+ Likewise.
+ * cfgloop.c (flow_loops_cfg_dump, get_loop_body, record_loop_exits,
+ verify_loop_structure): Likewise.
+ * cfgloopanal.c (mark_loop_exit_edges): Likewise.
+ * cfgrtl.c (compute_bb_for_insn, find_partition_fixes,
+ verify_hot_cold_block_grouping, purge_all_dead_edges,
+ fixup_abnormal_edges, record_effective_endpoints,
+ outof_cfg_layout_mode, fixup_reorder_chain, force_one_exit_fallthru,
+ break_superblocks): Likewise.
+ * cgraphbuild.c (build_cgraph_edges, rebuild_cgraph_edges,
+ cgraph_rebuild_references): Likewise.
+ * combine-stack-adj.c (combine_stack_adjustments): Likewise.
+ * combine.c (delete_noop_moves, create_log_links,
+ combine_instructions): Likewise.
+ * config/arm/arm.c (thumb1_reorg, thumb2_reorg): Likewise.
+ * config/bfin/bfin.c (bfin_gen_bundles, reorder_var_tracking_notes):
+ Likewise.
+ * config/c6x/c6x.c (c6x_gen_bundles, conditionalize_after_sched,
+ c6x_reorg): Likewise.
+ * config/epiphany/resolve-sw-modes.c (resolve_sw_modes): Likewise.
+ * config/frv/frv.c (frv_optimize_membar): Likewise.
+ * config/i386/i386.c (ix86_finalize_stack_realign_flags): Likewise.
+ * config/ia64/ia64.c (ia64_reorg): Likewise.
+ * config/mips/mips.c (mips_annotate_pic_calls): Likewise.
+ * config/picochip/picochip.c (reorder_var_tracking_notes): Likewise.
+ * config/rs6000/rs6000.c (rs6000_alloc_sdmode_stack_slot): Likewise.
+ * config/s390/s390.c (s390_regs_ever_clobbered): Likewise.
+ * config/sh/sh_treg_combine.cc (sh_treg_combine::execute): Likewise.
+ * config/spu/spu.c (spu_machine_dependent_reorg): Likewise.
+ * config/tilegx/tilegx.c (tilegx_gen_bundles,
+ reorder_var_tracking_notes): Likewise.
+ * config/tilepro/tilepro.c (tilepro_gen_bundles,
+ reorder_var_tracking_notes): Likewise.
+ * coverage.c (coverage_compute_cfg_checksum): Likewise.
+ * cprop.c (compute_hash_table_work, compute_cprop_data,
+ local_cprop_pass, find_implicit_sets): Likewise.
+ * cse.c (cse_condition_code_reg): Likewise.
+ * dce.c (prescan_insns_for_dce): Likewise.
+ * df-core.c (df_compact_blocks): Likewise.
+ * df-problems.c (df_word_lr_alloc): Likewise.
+ * df-scan.c (df_scan_start_dump, df_scan_blocks, df_insn_rescan_all,
+ df_update_entry_exit_and_calls): Likewise.
+ * dominance.c (calculate_dominance_info, verify_dominators,
+ debug_dominance_info): Likewise.
+ * dse.c (dse_step5_nospill): Likewise.
+ * except.c (finish_eh_generation): Likewise.
+ * final.c (compute_alignments): Likewise.
+ * function.c (thread_prologue_and_epilogue_insns,
+ rest_of_match_asm_constraints): Likewise.
+ * gcse.c (compute_hash_table_work, prune_expressions,
+ compute_pre_data, compute_code_hoist_vbeinout, hoist_code,
+ calculate_bb_reg_pressure, compute_ld_motion_mems): Likewise.
+ * gimple-iterator.c (gsi_commit_edge_inserts): Likewise.
+ * gimple-ssa-isolate-paths.c (find_implicit_erroneous_behaviour,
+ find_explicit_erroneous_behaviour): Likewise.
+ * graphite-sese-to-poly.c (rewrite_reductions_out_of_ssa,
+ rewrite_cross_bb_scalar_deps_out_of_ssa): Likewise.
+ * haifa-sched.c (haifa_sched_init): Likewise.
+ * hw-doloop.c (discover_loops, set_bb_indices, reorder_loops):
+ Likewise.
+ * ifcvt.c (if_convert): Likewise.
+ * init-regs.c (initialize_uninitialized_regs): Likewise.
+ * ipa-prop.c (ipcp_transform_function): Likewise.
+ * ipa-pure-const.c (analyze_function): Likewise.
+ * ipa-split.c (find_split_points, execute_split_functions): Likewise.
+ * ira-build.c (form_loop_tree): Likewise.
+ * ira-costs.c (find_costs_and_classes): Likewise.
+ * ira-emit.c (emit_moves, add_ranges_and_copies, ira_emit): Likewise.
+ * ira.c (decrease_live_ranges_number, compute_regs_asm_clobbered,
+ mark_elimination, update_equiv_regs, find_moveable_pseudos,
+ split_live_ranges_for_shrink_wrap, allocate_initial_values): Likewise.
+ * jump.c (mark_all_labels): Likewise.
+ * lcm.c (compute_laterin, compute_insert_delete, compute_available,
+ compute_nearerout, compute_rev_insert_delete): Likewise.
+ * loop-init.c (fix_loop_structure): Likewise.
+ * loop-invariant.c (calculate_loop_reg_pressure): Likewise.
+ * lower-subreg.c (decompose_multiword_subregs,
+ decompose_multiword_subregs): Likewise.
+ * lra-assigns.c (assign_by_spills): Likewise.
+ * lra-coalesce.c (lra_coalesce): Likewise.
+ * lra-constraints.c (lra_inheritance, remove_inheritance_pseudos):
+ Likewise.
+ * lra-eliminations.c (lra_init_elimination): Likewise.
+ * lra-spills.c (assign_spill_hard_regs, spill_pseudos,
+ lra_final_code_change): Likewise.
+ * lra.c (remove_scratches, check_rtl, has_nonexceptional_receiver,
+ update_inc_notes): Likewise.
+ * mcf.c (adjust_cfg_counts): Likewise.
+ * mode-switching.c (optimize_mode_switching): Likewise.
+ * modulo-sched.c (rest_of_handle_sms): Likewise.
+ * omp-low.c (optimize_omp_library_calls, expand_omp_taskreg,
+ expand_omp_target): Likewise.
+ * postreload-gcse.c (alloc_mem, compute_hash_table): Likewise.
+ * postreload.c (reload_cse_regs_1): Likewise.
+ * predict.c (strip_predict_hints, tree_bb_level_predictions,
+ tree_estimate_probability, expensive_function_p,
+ estimate_bb_frequencies, compute_function_frequency): Likewise.
+ * profile.c (is_inconsistent, compute_branch_probabilities,
+ branch_prob): Likewise.
+ * ree.c (find_removable_extensions): Likewise.
+ * reg-stack.c (compensate_edges, convert_regs, reg_to_stack): Likewise.
+ * regcprop.c (copyprop_hardreg_forward): Likewise.
+ * reginfo.c (init_subregs_of_mode): Likewise.
+ * regrename.c (regrename_analyze): Likewise.
+ * regstat.c (regstat_compute_ri, regstat_compute_calls_crossed):
+ Likewise.
+ * reload1.c (has_nonexceptional_receiver, reload,
+ calculate_elim_costs_all_insns): Likewise.
+ * resource.c (init_resource_info, free_resource_info): Likewise.
+ * sched-ebb.c (schedule_ebbs): Likewise.
+ * sched-rgn.c (is_cfg_nonregular, find_single_block_region,
+ haifa_find_rgns, sched_rgn_local_init): Likewise.
+ * sel-sched-dump.c (sel_dump_cfg_2): Likewise.
+ * sel-sched-ir.c (init_lv_sets, free_lv_sets,
+ make_regions_from_the_rest): Likewise.
+ * sese.c (build_sese_loop_nests, sese_build_liveouts): Likewise.
+ * stack-ptr-mod.c (notice_stack_pointer_modification): Likewise.
+ * store-motion.c (compute_store_table, build_store_vectors,
+ one_store_motion_pass): Likewise.
+ * tracer.c (tail_duplicate): Likewise.
+ * trans-mem.c (compute_transaction_bits): Likewise.
+ * tree-call-cdce.c (tree_call_cdce): Likewise.
+ * tree-cfg.c (replace_loop_annotate, factor_computed_gotos,
+ fold_cond_expr_cond, make_edges, assign_discriminators,
+ make_abnormal_goto_edges, cleanup_dead_labels, group_case_labels,
+ dump_cfg_stats, gimple_verify_flow_info, print_loop,
+ execute_fixup_cfg): Likewise.
+ * tree-cfgcleanup.c (cleanup_tree_cfg_1, merge_phi_nodes): Likewise.
+ * tree-complex.c (init_dont_simulate_again, tree_lower_complex):
+ Likewise.
+ * tree-dfa.c (collect_dfa_stats, dump_enumerated_decls): Likewise.
+ * tree-eh.c (execute_lower_resx, execute_lower_eh_dispatch,
+ mark_reachable_handlers): Likewise.
+ * tree-emutls.c (lower_emutls_function_body): Likewise.
+ * tree-if-conv.c (main_tree_if_conversion): Likewise.
+ * tree-inline.c (optimize_inline_calls): Likewise.
+ * tree-into-ssa.c (rewrite_into_ssa, update_ssa): Likewise.
+ * tree-nrv.c (tree_nrv, execute_return_slot_opt): Likewise.
+ * tree-object-size.c (compute_object_sizes): Likewise.
+ * tree-outof-ssa.c (eliminate_useless_phis, rewrite_trees,
+ insert_backedge_copies, tree_profiling): Likewise.
+ * tree-scalar-evolution.c (scev_const_prop): Likewise.
+ * tree-sra.c (scan_function, sra_modify_function_body,
+ propagate_dereference_distances, ipa_sra_modify_function_body,
+ convert_callers): Likewise.
+ * tree-ssa-ccp.c (ccp_initialize, execute_fold_all_builtins): Likewise.
+ * tree-ssa-coalesce.c (build_ssa_conflict_graph): Likewise.
+ create_outofssa_var_map, coalesce_partitions): Likewise.
+ * tree-ssa-copy.c (init_copy_prop): Likewise.
+ * tree-ssa-copyrename.c (rename_ssa_copies): Likewise.
+ * tree-ssa-dce.c (find_obviously_necessary_stmts,
+ eliminate_unnecessary_stmts): Likewise.
+ * tree-ssa-dom.c (free_all_edge_infos, tree_ssa_dominator_optimize):
+ Likewise.
+ * tree-ssa-forwprop.c (ssa_forward_propagate_and_combine): Likewise.
+ * tree-ssa-live.c (clear_unused_block_pointer, remove_unused_locals,
+ new_tree_live_info, calculate_live_on_exit, dump_live_info,
+ analyze_memory_references, fill_always_executed_in,
+ tree_ssa_lim_finalize): Likewise.
+ * tree-ssa-loop-manip.c (find_uses_to_rename, verify_loop_closed_ssa):
+ Likewise.
+ * tree-ssa-math-opts.c (execute_cse_reciprocals, execute_cse_sincos,
+ execute_optimize_bswap, execute_optimize_widening_mul): Likewise.
+ * tree-ssa-propagate.c (substitute_and_fold): Likewise.
+ * tree-ssa-structalias.c (compute_points_to_sets): Likewise.
+ * tree-ssa-tail-merge.c (find_same_succ, reset_cluster_vectors):
+ Likewise.
+ * tree-ssa-ter.c (find_replaceable_exprs): Likewise.
+ * tree-ssa-threadupdate.c (thread_through_all_blocks): Likewise.
+ * tree-ssa-uncprop.c (associate_equivalences_with_edges,
+ tree_ssa_uncprop): Likewise.
+ * tree-ssa-uninit.c (warn_uninitialized_vars,
+ execute_late_warn_uninitialized): Likewise.
+ * tree-ssa.c (verify_ssa, execute_update_addresses_taken): Likewise.
+ * tree-stdarg.c (check_all_va_list_escapes, execute_optimize_stdarg):
+ Likewise.
+ * tree-switch-conversion.c (do_switchconv): Likewise.
+ * tree-vect-generic.c (expand_vector_operations): Likewise.
+ * tree-vectorizer.c (adjust_simduid_builtins, note_simd_array_uses,
+ execute_vect_slp): Likewise.
+ * tree-vrp.c (check_all_array_refs, remove_range_assertions,
+ vrp_initialize, identify_jump_threads, instrument_memory_accesses):
+ Likewise.
+ * ubsan.c (ubsan_pass): Likewise.
+ * value-prof.c (verify_histograms, gimple_value_profile_transformations,
+ gimple_find_values_to_profile): Likewise.
+ * var-tracking.c (vt_find_locations, dump_dataflow_sets, vt_emit_notes,
+ vt_initialize, delete_debug_insns, vt_finalize): Likewise.
+
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
* basic-block.h (last_basic_block): Eliminate macro.
* asan.c (transform_statements): Eliminate use of last_basic_block
gimple_stmt_iterator i;
int saved_last_basic_block = last_basic_block_for_fn (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
basic_block prev_bb = bb;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
reg_next_use = XCNEWVEC (rtx, max_reg);
reg_next_inc_use = XCNEWVEC (rtx, max_reg);
reg_next_def = XCNEWVEC (rtx, max_reg);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
merge_in_block (max_reg, bb);
free (reg_next_use);
#define FOR_EACH_BB_FN(BB, FN) \
FOR_BB_BETWEEN (BB, (FN)->cfg->x_entry_block_ptr->next_bb, (FN)->cfg->x_exit_block_ptr, next_bb)
-#define FOR_EACH_BB(BB) FOR_EACH_BB_FN (BB, cfun)
-
#define FOR_EACH_BB_REVERSE_FN(BB, FN) \
FOR_BB_BETWEEN (BB, (FN)->cfg->x_exit_block_ptr->prev_bb, (FN)->cfg->x_entry_block_ptr, prev_bb)
vec<basic_block> bbs_in_hot_partition = vNULL;
/* Mark which partition (hot/cold) each basic block belongs in. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool cold_bb = false;
/* Mark every edge that crosses between sections. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_EACH_EDGE (e, ei, bb->succs)
{
unsigned int flags = e->flags;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
rtx old_jump;
rtx fall_thru_label;
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
{
fall_thru = NULL;
if (EDGE_COUNT (cur_bb->succs) > 0)
rtx old_label = NULL_RTX;
rtx new_label;
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
{
crossing_edge = NULL;
if (EDGE_COUNT (cur_bb->succs) > 0)
rtx cur_insn;
edge succ;
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
{
last_insn = BB_END (cur_bb);
edge e;
edge_iterator ei;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & EDGE_CROSSING)
&& JUMP_P (BB_END (e->src))
if (!crtl->has_bb_partition)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (!current_partition)
current_partition = BB_PARTITION (bb);
reorder_basic_blocks ();
cleanup_cfg (CLEANUP_EXPENSIVE);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
cfg_layout_finalize ();
/* Look for blocks that end in a computed jump, and see if such blocks
are suitable for unfactoring. If a block is a candidate for unfactoring,
mark it in the candidates. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
edge e;
goto done;
/* Duplicate computed gotos. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->flags & BB_VISITED)
continue;
edge e;
edge_iterator ei;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->succs)
free_edge (e);
basic_block bb;
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
bb->index = i;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
dump_bb_info (file, bb, 0,
flags & (TDF_COMMENT | TDF_DETAILS),
/* Clear all the reachability flags. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_REACHABLE;
/* Add our starting points to the worklist. Almost always there will
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (EDGE_COUNT (bb->succs) == 0)
make_single_succ_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
}
edge p;
edge_iterator ei;
basic_block b;
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
if (EDGE_COUNT (b->preds) >= 2)
{
bitmap_clear (visited);
MARK_VISITED (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- FOR_EACH_BB (x)
+ FOR_EACH_BB_FN (x, cfun)
{
if (VISITED_P (x))
continue;
{
basic_block bb, min, max;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
SET_STATE (bb,
bitmap_bit_p (blocks, bb->index) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (STATE (bb) == BLOCK_TO_SPLIT)
find_bb_boundaries (bb);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (STATE (bb) != BLOCK_ORIGINAL)
break;
compute_outgoing_frequencies (bb);
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
SET_STATE (bb, 0);
}
crossjumps_occured = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
update_forwarder_flag (bb);
if (! targetm.cannot_modify_jumps_p ())
/* A dead jump table does not belong to any basic block. Scan insns
between two adjacent basic blocks. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
add_scope_conflicts_1 (bb, work, true);
free (rpo);
basic_block bb;
gimple_stmt_iterator gsi;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
if (!file)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge succ;
edge_iterator ei;
gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
body[tv++] = loop->header;
body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
body[tv++] = bb;
}
else
loop_exit_hash, loop_exit_eq,
loop_exit_free);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
verify_dominators (CDI_DOMINATORS);
/* Check the headers. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_loop_header_p (bb))
{
if (bb->loop_father->header == NULL)
{
/* Record old info. */
irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
if (bb->flags & BB_IRREDUCIBLE_LOOP)
mark_irreducible_loops ();
/* Compare. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
sizes = XCNEWVEC (unsigned, num);
memset (sizes, 0, sizeof (unsigned) * num);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
if (bb->loop_father == current_loops->tree_root)
if (number_of_loops (cfun) <= 1)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx end = BB_END (bb);
rtx insn;
/* Callers check this. */
gcc_checking_assert (crtl->has_bb_partition);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
bbs_in_cold_partition.safe_push (bb);
|| current_ir_type () != IR_RTL_CFGRTL)
return err;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (current_partition != BB_UNPARTITIONED
&& BB_PARTITION (bb) != current_partition)
int purged = false;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool purged_here = purge_dead_edges (bb);
bool inserted = false;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
cfg_layout_function_header = NULL_RTX;
next_insn = get_insns ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx end;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
relink_block_chain (/*stay_in_cfglayout_mode=*/false);
/* Annoying special case - jump around dead jumptables left in the code. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e = find_fallthru_edge (bb->succs);
/* Ensure goto_locus from edges has some instructions with that locus
in RTL. */
if (!optimize)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
/* Fix up the chain of blocks -- make FORWARDER immediately precede the
exit block. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->aux == NULL && bb != forwarder)
{
superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (superblocks);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->flags & BB_SUPERBLOCK)
{
bb->flags &= ~BB_SUPERBLOCK;
/* Create the callgraph edges and record the nodes referenced by the function.
body. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
ipa_record_stmt_references (node, gsi_stmt (gsi));
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
combine_stack_adjustments_for_block (bb);
}
rtx insn, next;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
{
usage -- these are taken from original flow.c did. Don't ask me why it is
done this way; I don't know and if it works, I don't want to know. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS_REVERSE (bb, insn)
{
last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
create_log_links ();
- FOR_EACH_BB (this_basic_block)
+ FOR_EACH_BB_FN (this_basic_block, cfun)
{
optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
last_call_luid = 0;
setup_incoming_promotions (first);
last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
- FOR_EACH_BB (this_basic_block)
+ FOR_EACH_BB_FN (this_basic_block, cfun)
{
rtx last_combined_insn = NULL_RTX;
optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx dest, src;
rtx pat, op0, set = NULL;
compute_bb_for_insn ();
df_analyze ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
bfin_gen_bundles (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx slot[3];
reorder_var_tracking_notes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx queue = NULL_RTX;
basic_block bb;
rtx insn, next, last_call;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
/* The machine is eight insns wide. We can have up to six shadow
{
basic_block bb;
rtx insn;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
unsigned uid = INSN_UID (insn);
if (c6x_flag_schedule_insns2)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if ((bb->flags & BB_DISABLE_SCHEDULE) == 0)
assign_reservations (BB_HEAD (bb), BB_END (bb));
}
df_note_add_problem ();
df_analyze ();
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
enum attr_fp_mode selected_mode;
first_io = XCNEWVEC (struct frv_io, last_basic_block_for_fn (cfun));
last_membar = XCNEWVEC (rtx, last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
frv_optimize_membar_local (bb, &first_io[bb->index],
&last_membar[bb->index]);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (last_membar[bb->index] != 0)
frv_optimize_membar_global (bb, first_io, last_membar[bb->index]);
add_to_hard_reg_set (&set_up_by_prologue, Pmode, ARG_POINTER_REGNUM);
add_to_hard_reg_set (&set_up_by_prologue, Pmode,
HARD_FRAME_POINTER_REGNUM);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
FOR_BB_INSNS (bb, insn)
/* We can't let modulo-sched prevent us from scheduling any bbs,
since we need the final schedule to produce bundle information. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_DISABLE_SCHEDULE;
initiate_bundle_states ();
basic_block bb;
rtx insn;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
rtx call, reg, symbol, second_call;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next, last_insn = NULL_RTX;
rtx queue = NULL_RTX;
if (TARGET_NO_SDMODE_STACK)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
if (!call_really_used_regs[i])
regs_ever_clobbered[i] = 1;
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
{
FOR_BB_INSNS (cur_bb, cur_insn)
{
// Look for basic blocks that end with a conditional branch and try to
// optimize them.
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx i = BB_END (bb);
if (any_condjump_p (i) && onlyjump_p (i))
find_many_sub_basic_blocks (blocks);
/* We have to schedule to make sure alignment is ok. */
- FOR_EACH_BB (bb) bb->flags &= ~BB_DISABLE_SCHEDULE;
+ FOR_EACH_BB_FN (bb, cfun) bb->flags &= ~BB_DISABLE_SCHEDULE;
/* The hints need to be scheduled, so call it again. */
schedule_insns ();
tilegx_gen_bundles (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx end = NEXT_INSN (BB_END (bb));
reorder_var_tracking_notes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx queue = NULL_RTX;
tilepro_gen_bundles (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx end = NEXT_INSN (BB_END (bb));
reorder_var_tracking_notes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn, next;
rtx queue = NULL_RTX;
basic_block bb;
unsigned chksum = n_basic_blocks_for_fn (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
/* Allocate vars to track sets of regs. */
reg_set_bitmap = ALLOC_REG_SET (NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
aren't recorded for the local pass so they cannot be propagated within
their basic block by this pass and 2) the global pass would otherwise
propagate them only in the successors of their basic block. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
int index = implicit_set_indexes[bb->index];
if (index != -1)
unsigned i;
cselib_init (0);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS (bb, insn)
{
implicit_sets = XCNEWVEC (rtx, implicit_sets_size);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Check for more than one successor. */
if (EDGE_COUNT (bb->succs) <= 1)
else
cc_reg_2 = NULL_RTX;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx last_insn;
rtx cc_reg;
if (!df_in_progress && ACCUMULATE_OUTGOING_ARGS)
arg_stores = BITMAP_ALLOC (NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS_REVERSE_SAFE (bb, insn, prev)
if (NONDEBUG_INSN_P (insn))
bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bitmap_bit_p (&tmp, bb->index))
bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
place in the block_info vector. Null out the copied
item. The entry and exit blocks never move. */
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
df_set_bb_info (dflow, i,
(char *)problem_temps
bitmap_copy (&tmp, df->blocks_to_analyze);
bitmap_clear (df->blocks_to_analyze);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bitmap_bit_p (&tmp, bb->index))
bitmap_set_bit (df->blocks_to_analyze, i);
bitmap_clear (&tmp);
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
bb->index = i;
bitmap_obstack_initialize (&problem_data->word_lr_bitmaps);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_set_bit (df_word_lr->out_of_date_transfer_functions, bb->index);
bitmap_set_bit (df_word_lr->out_of_date_transfer_functions, ENTRY_BLOCK);
fprintf (file, "} ");
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
{
df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK));
/* Regular blocks */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
unsigned int bb_index = bb->index;
df_bb_refs_record (bb_index, true);
bitmap_clear (&df->insns_to_rescan);
bitmap_clear (&df->insns_to_notes_rescan);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
FOR_BB_INSNS (bb, insn)
/* The call insns need to be rescanned because there may be changes
in the set of registers clobbered across the call. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
FOR_BB_INSNS (bb, insn)
calc_dfs_tree (&di, reverse);
calc_idoms (&di, reverse);
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
TBB d = di.dom[di.dfs_order[b->index]];
calc_dfs_tree (&di, reverse);
calc_idoms (&di, reverse);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
imm_bb = get_immediate_dominator (dir, bb);
if (!imm_bb)
debug_dominance_info (enum cdi_direction dir)
{
basic_block bb, bb2;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if ((bb2 = get_immediate_dominator (dir, bb)))
fprintf (stderr, "%i %i\n", bb->index, bb2->index);
}
dse_step5_nospill (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bb_info_t bb_info = bb_table[bb->index];
insn_info_t insn_info = bb_info->last_insn;
commit_edge_insertions ();
/* Redirect all EH edges from the post_landing_pad to the landing pad. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
eh_landing_pad lp;
edge_iterator ei;
flow_loops_dump (dump_file, NULL, 1);
}
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->frequency > freq_max)
freq_max = bb->frequency;
freq_threshold = freq_max / PARAM_VALUE (PARAM_ALIGN_THRESHOLD);
if (dump_file)
fprintf (dump_file, "freq_max: %i\n",freq_max);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx label = BB_HEAD (bb);
int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0;
max_grow_size = get_uncond_jump_length ();
max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
unsigned size = 0;
needing a prologue. */
bitmap_clear (&bb_on_list);
bitmap_and_compl (&bb_antic_flags, &bb_flags, &bb_tail);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (!bitmap_bit_p (&bb_antic_flags, bb->index))
continue;
/* Find exactly one edge that leads to a block in ANTIC from
a block that isn't. */
if (!bitmap_bit_p (&bb_antic_flags, entry_edge->dest->index))
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (!bitmap_bit_p (&bb_antic_flags, bb->index))
continue;
/* Find tail blocks reachable from both blocks needing a
prologue and blocks not needing a prologue. */
if (!bitmap_empty_p (&bb_tail))
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool some_pro, some_no_pro;
if (!bitmap_bit_p (&bb_tail, bb->index))
we take advantage of cfg_layout_finalize using
fixup_fallthru_exit_predecessor. */
cfg_layout_initialize (0);
- FOR_EACH_BB (cur_bb)
+ FOR_EACH_BB_FN (cur_bb, cfun)
if (cur_bb->index >= NUM_FIXED_BLOCKS
&& cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
cur_bb->aux = cur_bb->next_bb;
return 0;
df_set_flags (DF_DEFER_INSN_RESCAN);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS (bb, insn)
{
for (i = 0; i < max_reg_num (); ++i)
reg_avail_info[i].last_bb = NULL;
- FOR_EACH_BB (current_bb)
+ FOR_EACH_BB_FN (current_bb, cfun)
{
rtx insn;
unsigned int regno;
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
~(TRANSP | COMP)
*/
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
{
fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (dump_file, "vbein (%d): ", bb->index);
dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
to_bb_head = XCNEWVEC (int, get_max_uid ());
bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
int to_head;
ira_setup_eliminable_regset ();
curr_regs_live = BITMAP_ALLOC (®_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
curr_bb = bb;
BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
return;
fprintf (dump_file, "\nRegister Pressure: \n");
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (dump_file, " Basic block %d: \n", bb->index);
for (i = 0; (int) i < ira_pressure_classes_num; i++)
pre_ldst_mems = NULL;
pre_ldst_table.create (13);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS (bb, insn)
{
gsi_commit_one_edge_insert (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_EACH_EDGE (e, ei, bb->succs)
gsi_commit_one_edge_insert (e, NULL);
}
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
gimple_stmt_iterator psi;
sese region = SCOP_REGION (scop);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_in_sese_p (bb, region))
for (psi = gsi_start_phis (bb); !gsi_end_p (psi);)
{
/* Create an extra empty BB after the scop. */
split_edge (SESE_EXIT (region));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_in_sese_p (bb, region))
for (psi = gsi_start_bb (bb); !gsi_end_p (psi); gsi_next (&psi))
changed |= rewrite_cross_bb_scalar_deps (scop, &psi);
sched_init_bbs ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bbs.quick_push (bb);
sched_init_luids (bbs);
sched_deps_init (true);
/* Find all the possible loop tails. This means searching for every
loop_end instruction. For each one found, create a hwloop_info
structure and add the head block to the work list. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx tail = BB_END (bb);
rtx insn, reg;
intptr_t index;
index = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->aux = (void *) index++;
}
loops = loops->next;
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
fprintf (dump_file, "\n\n========== Pass %d ==========\n", pass);
#endif
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
basic_block new_bb;
while (!df_get_bb_dirty (bb)
df_analyze ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
bitmap lr = DF_LR_IN (bb);
descriptors.safe_grow_cleared (param_count);
ipa_populate_param_decls (node, descriptors);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
struct ipa_agg_replacement_value *v;
push_cfun (DECL_STRUCT_FUNCTION (decl));
- FOR_EACH_BB (this_block)
+ FOR_EACH_BB_FN (this_block, cfun)
{
gimple_stmt_iterator gsi;
struct walk_stmt_info wi;
stack.pop ();
}
ENTRY_BLOCK_PTR_FOR_FN (cfun)->aux = NULL;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->aux = NULL;
stack.release ();
BITMAP_FREE (current.ssa_names_to_pass);
/* Compute local info about basic blocks and determine function size/time. */
bb_info_vec.safe_grow_cleared (last_basic_block_for_fn (cfun) + 1);
memset (&best_split_point, 0, sizeof (best_split_point));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
int time = 0;
int size = 0;
/* We can not use loop/bb node access macros because of potential
checking and because the nodes are not initialized enough
yet. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bb_node = &ira_bb_nodes[bb->index];
bb_node->bb = bb;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
process_bb_for_costs (bb);
}
edge e;
rtx insns, tmp;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (at_bb_start[bb->index] != NULL)
{
bitmap live_through;
live_through = ira_allocate_bitmap ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* It does not matter what loop_tree_node (of source or
destination block) to use for searching allocnos by their
ira_free_bitmap (renamed_regno_bitmap);
ira_free_bitmap (local_allocno_bitmap);
setup_entered_from_non_parent_p ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
at_bb_start[bb->index] = NULL;
at_bb_end[bb->index] = NULL;
memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ());
memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check));
curr_tick = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
unify_moves (bb, true);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
unify_moves (bb, false);
move_vec.create (ira_allocnos_num);
emit_moves ();
add_ranges_and_copies ();
/* Clean up: */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
free_move_list (at_bb_start[bb->index]);
free_move_list (at_bb_end[bb->index]);
reload assumes initial insn codes defined. The insn codes can be
invalidated by CFG infrastructure for example in jump
redirection. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS_REVERSE (bb, insn)
if (INSN_P (insn))
recog_memoized (insn);
if (ira_dump_file)
fprintf (ira_dump_file, "Starting decreasing number of live ranges...\n");
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
set = single_set (insn);
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
FOR_BB_INSNS_REVERSE (bb, insn)
basic_block bb;
bitmap r;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
r = DF_LR_IN (bb);
if (bitmap_bit_p (r, from))
paradoxical subreg. Don't set such reg sequivalent to a mem,
because lra will not substitute such equiv memory in order to
prevent access beyond allocated memory for paradoxical memory subreg. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (NONDEBUG_INSN_P (insn))
for_each_rtx (&insn, set_paradoxical_subreg, (void *) pdx_subregs);
/* Scan the insns and find which registers have equivalences. Do this
in a separate scan of the insns because (due to -fcse-follow-jumps)
a register can be set below its use. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
loop_depth = bb_loop_depth (bb);
if (!bitmap_empty_p (cleared_regs))
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
bitmap_initialize (&used, 0);
bitmap_initialize (&set, 0);
bitmap_initialize (&unusable_as_input, 0);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
bitmap transp = bb_transp_live + bb->index;
bitmap_clear (&used);
bitmap_clear (&set);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap local = bb_local + bb->index;
rtx insn;
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap_clear (bb_local + bb->index);
bitmap_clear (bb_transp_live + bb->index);
bitmap_initialize (&reachable, 0);
queue.create (n_basic_blocks_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (CALL_P (insn) && !SIBLING_CALL_P (insn))
{
fixed regs are accepted. */
SET_REGNO (preg, new_regno);
/* Update global register liveness information. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (REGNO_REG_SET_P (df_get_live_in (bb), regno))
SET_REGNO_REG_SET (df_get_live_in (bb), new_regno);
if (current_ir_type () == IR_RTL_CFGLAYOUT)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* In cfglayout mode, we don't bother with trivial next-insn
propagation of LABEL_REFs into JUMP_LABEL. This will be
/* Add all the blocks to the worklist. This prevents an early exit from
the loop given our optimistic initialization of LATER above. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
*qin++ = bb;
bb->aux = bb;
int x;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_and_compl (del[bb->index], antloc[bb->index],
laterin[bb->index]);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of AVOUT above. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
*qin++ = bb;
bb->aux = bb;
/* Add all the blocks to the worklist. This prevents an early exit
from the loop given our optimistic initialization of NEARER. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
*tos++ = bb;
bb->aux = bb;
int x;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_and_compl (del[bb->index], st_avloc[bb->index],
nearerout[bb->index]);
/* Remember the depth of the blocks in the loop hierarchy, so that we can
recognize blocks whose loop nesting relationship has changed. */
if (changed_bbs)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->aux = (void *) (size_t) loop_depth (bb->loop_father);
/* Remove the dead loops from structures. We start from the innermost
/* Mark the blocks whose loop has changed. */
if (changed_bbs)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if ((void *) (size_t) loop_depth (bb->loop_father) != bb->aux)
bitmap_set_bit (changed_bbs, bb->index);
}
ira_setup_eliminable_regset ();
bitmap_initialize (&curr_regs_live, ®_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
curr_loop = bb->loop_father;
if (curr_loop == current_loops->tree_root)
memset (reg_copy_graph.address (), 0, sizeof (bitmap) * max);
speed_p = optimize_function_for_speed_p (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
EXECUTE_IF_SET_IN_BITMAP (decomposable_context, 0, regno, iter)
decompose_register (regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
/* FIXME: Look up the changed insns in the cached LRA insn data using
an EXECUTE_IF_SET_IN_BITMAP over changed_insns. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (bitmap_bit_p (&changed_insns, INSN_UID (insn)))
{
mv_num = 0;
/* Collect moves. */
coalesced_moves = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS_SAFE (bb, insn, next)
if (INSN_P (insn)
}
}
bitmap_initialize (&used_pseudos_bitmap, ®_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
update_live_info (df_get_live_in (bb));
update_live_info (df_get_live_out (bb));
bitmap_initialize (&live_regs, ®_obstack);
bitmap_initialize (&temp_bitmap, ®_obstack);
bitmap_initialize (&ebb_global_regs, ®_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
start_bb = bb;
if (lra_dump_file != NULL)
because we need to marks insns affected by previous
inheritance/split pass for processing by the subsequent
constraint pass. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fix_bb_live_info (df_get_live_in (bb), remove_pseudos);
fix_bb_live_info (df_get_live_out (bb), remove_pseudos);
struct elim_table *ep;
init_elim_table ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
curr_sp_change = 0;
stop_to_sp_elimination_p = false;
add_to_hard_reg_set (&reserved_hard_regs[p],
lra_reg_info[i].biggest_mode, hard_regno);
bitmap_initialize (&ok_insn_bitmap, ®_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (DEBUG_INSN_P (insn)
|| ((set = single_set (insn)) != NULL_RTX
bitmap_ior_into (&changed_insns, &lra_reg_info[i].insn_bitmap);
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS (bb, insn)
if (bitmap_bit_p (&changed_insns, INSN_UID (insn)))
if (lra_reg_info[i].nrefs != 0
&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
SET_REGNO (regno_reg_rtx[i], hard_regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS_SAFE (bb, insn, curr)
if (INSN_P (insn))
{
scratches.create (get_max_uid ());
bitmap_initialize (&scratch_bitmap, ®_obstack);
bitmap_initialize (&scratch_operand_bitmap, ®_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
{
rtx insn;
lra_assert (! final_p || reload_completed);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (NONDEBUG_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) != USE
/* First determine which blocks can reach exit via normal paths. */
tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
basic_block bb;
rtx insn;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (NONDEBUG_INSN_P (insn))
{
{
fprintf (dump_file, "\nCheck %s() CFG flow conservation:\n",
current_function_name ());
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if ((bb->count != sum_edge_counts (bb->preds))
|| (bb->count != sum_edge_counts (bb->succs)))
/* Determine what the first use (if any) need for a mode of entity E is.
This will be the mode that is anticipatable for this block.
Also compute the initial transparency settings. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
struct seginfo *ptr;
int last_mode = no_mode;
int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
struct bb_info *info = bb_info[j];
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (info[bb->index].seginfo->mode == m)
bitmap_set_bit (antic[bb->index], j);
/* Calculate the optimal locations for the
placement mode switches to modes with priority I. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_not (kill[bb->index], transp[bb->index]);
edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
kill, &insert, &del);
max_regno = max_reg_num ();
/* Finalize layout changes. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
bb->aux = bb->next_bb;
free_dominance_info (CDI_DOMINATORS);
&& find_omp_clause (gimple_omp_task_clauses (entry_stmt),
OMP_CLAUSE_UNTIED) != NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple call = gsi_stmt (gsi);
basic_block bb;
bool changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
changed |= gimple_purge_dead_eh_edges (bb);
if (changed)
cleanup_tree_cfg ();
basic_block bb;
bool changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
changed |= gimple_purge_dead_eh_edges (bb);
if (changed)
cleanup_tree_cfg ();
/* Find the largest UID and create a mapping from UIDs to CUIDs. */
uid_cuid = XCNEWVEC (int, get_max_uid () + 1);
i = 1;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
cselib_init (CSELIB_RECORD_MEMORY);
init_alias_analysis ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
gimple ass_stmt;
tree var;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator bi;
for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
apply_return_prediction ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
if (number_of_loops (cfun) > 1)
predict_loops ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
tree_estimate_probability_bb (bb);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
combine_predictions_for_bb (bb);
#ifdef ENABLE_CHECKING
/* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency * threshold;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
estimate_loops ();
memcpy (&freq_max, &real_zero, sizeof (real_zero));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (sreal_compare (&freq_max, &BLOCK_INFO (bb)->frequency) < 0)
memcpy (&freq_max, &BLOCK_INFO (bb)->frequency, sizeof (freq_max));
functions to unlikely and that is most of what we care about. */
if (!cfun->after_inlining)
node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (maybe_hot_bb_p (cfun, bb))
{
{
basic_block bb;
bool inconsistent = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
inconsistent |= is_edge_inconsistent (bb->preds);
if (!dump_file && inconsistent)
/* If the graph has been correctly solved, every block will have a
succ and pred count of zero. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gcc_assert (!BB_INFO (bb)->succ_count && !BB_INFO (bb)->pred_count);
}
We also add fake exit edges for each call and asm statement in the
basic, since it may not return. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
int need_exit_edge = 0, need_entry_edge = 0;
int have_exit_edge = 0, have_entry_edge = 0;
/* Initialize the output. */
output_location (NULL, 0, NULL, NULL);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
gcov_position_t offset = 0;
rtx insn, set;
unsigned *def_map = XCNEWVEC (unsigned, max_insn_uid);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
if (!NONDEBUG_INSN_P (insn))
starting_stack_p = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun))
{
edge e;
/* ??? Process all unreachable blocks. Though there's no excuse
for keeping these even when not optimizing. */
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
block_info bi = BLOCK_INFO (b);
/* Set up block info for each basic block. */
alloc_aux_for_blocks (sizeof (struct block_info_def));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
block_info bi = BLOCK_INFO (bb);
edge_iterator ei;
= create_alloc_pool ("debug insn changes pool",
sizeof (struct queued_debug_insn_change), 256);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bitmap_set_bit (visited, bb->index);
if (MAY_HAVE_DEBUG_INSNS)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bitmap_bit_p (visited, bb->index)
&& all_vd[bb->index].n_debug_insn_changes)
{
bitmap_obstack_initialize (&srom_obstack);
subregs_of_mode = BITMAP_ALLOC (&srom_obstack);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
if (NONDEBUG_INSN_P (insn))
find_subregs_of_mode (PATTERN (insn), subregs_of_mode);
/* Gather some information about the blocks in this function. */
rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks_for_fn (cfun));
i = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
struct bb_rename_info *ri = rename_info + i;
ri->bb = bb;
We perform the analysis for both incoming and outgoing edges, but we
only need to merge once (in the second part, after verifying outgoing
edges). */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
unsigned j;
}
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
unsigned j;
free (rename_info);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->aux = NULL;
}
reg_info_p = XCNEWVEC (struct reg_info_t, max_regno);
local_live_last_luid = XNEWVEC (int, max_regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
regstat_bb_compute_ri (bb->index, live, artificial_uses,
local_live, local_processed,
reg_info_p_size = max_regno;
reg_info_p = XCNEWVEC (struct reg_info_t, max_regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
regstat_bb_compute_calls_crossed (bb->index, live);
}
/* First determine which blocks can reach exit via normal paths. */
tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_REACHABLE;
/* Place the exit block on our worklist. */
/* Now see if there's a reachable block with an exceptional incoming
edge. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->flags & BB_REACHABLE && bb_has_abnormal_pred (bb))
return true;
pseudo. */
if (! frame_pointer_needed)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_clear_bit (df_get_live_in (bb), HARD_FRAME_POINTER_REGNUM);
/* Come here (with failure set nonzero) if we can't get enough spill
set_initial_elim_offsets ();
set_initial_label_offsets ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
elim_bb = bb;
bb_ticks = XCNEWVEC (int, last_basic_block_for_fn (cfun));
/* Set the BLOCK_FOR_INSN of each label that starts a basic block. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (LABEL_P (BB_HEAD (bb)))
BLOCK_FOR_INSN (BB_HEAD (bb)) = bb;
}
bb_ticks = NULL;
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (LABEL_P (BB_HEAD (bb)))
BLOCK_FOR_INSN (BB_HEAD (bb)) = NULL;
}
schedule_ebbs_init ();
/* Schedule every region in the subroutine. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx head = BB_HEAD (bb);
/* If we have insns which refer to labels as non-jumped-to operands,
then we consider the cfg not well structured. */
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
FOR_BB_INSNS (b, insn)
{
rtx note, next, set, dest;
Unreachable loops with a single block are detected here. This
test is redundant with the one in find_rgns, but it's much
cheaper to go ahead and catch the trivial case here. */
- FOR_EACH_BB (b)
+ FOR_EACH_BB_FN (b, cfun)
{
if (EDGE_COUNT (b->preds) == 0
|| (single_pred_p (b)
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
- FOR_EACH_BB (ebb_start)
+ FOR_EACH_BB_FN (ebb_start, cfun)
{
RGN_NR_BLOCKS (nr_regions) = 0;
RGN_BLOCKS (nr_regions) = i;
}
}
else
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rgn_bb_table[nr_regions] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (dfs_nr[bb->index] == 0)
{
unreachable = 1;
to hold degree counts. */
degree = dfs_nr;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
degree[bb->index] = EDGE_COUNT (bb->preds);
/* Do not perform region scheduling if there are any unreachable
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
{
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
- FOR_EACH_BB (jbb)
+ FOR_EACH_BB_FN (jbb, cfun)
{
/* First identify blocks in the loop, except for the loop
entry block. */
Place those blocks into the queue. */
if (no_loops)
{
- FOR_EACH_BB (jbb)
+ FOR_EACH_BB_FN (jbb, cfun)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
if (single_succ_p (jbb)
/* Any block that did not end up in a region is placed into a region
by itself. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (degree[bb->index] >= 0)
{
rgn_bb_table[idx] = bb->index;
/* Use ->aux to implement EDGE_TO_BIT mapping. */
rgn_nr_edges = 0;
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;
rgn_edges = XNEWVEC (edge, rgn_nr_edges);
rgn_nr_edges = 0;
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;
/* Cleanup ->aux used for EDGE_TO_BIT mapping. */
/* We don't need them anymore. But we want to avoid duplication of
aux fields in the newly created edges. */
- FOR_EACH_BB (block)
+ FOR_EACH_BB_FN (block, cfun)
{
if (CONTAINING_RGN (block->index) != rgn)
continue;
if (flags & SEL_DUMP_CFG_FUNCTION_NAME)
fprintf (f, "function [label = \"%s\"];\n", current_function_name ());
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
insn_t insn = BB_HEAD (bb);
insn_t next_tail = NEXT_INSN (BB_END (bb));
basic_block bb;
/* Initialize of LV sets. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
init_lv_set (bb);
/* Don't forget EXIT_BLOCK. */
free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
/* Free LV sets. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (BB_LV_SET (bb))
free_lv_set (bb);
}
for (i = 0; i < last_basic_block_for_fn (cfun); i++)
loop_hdr[i] = -1;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->loop_father && !bb->loop_father->num == 0
&& !(bb->flags & BB_IRREDUCIBLE_LOOP))
/* For each basic block degree is calculated as the number of incoming
edges, that are going out of bbs that are not yet scheduled.
The basic blocks that are scheduled have degree value of zero. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
degree[bb->index] = 0;
/* Any block that did not end up in a region is placed into a region
by itself. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (degree[bb->index] >= 0)
{
rgn_bb_table[cur_rgn_blocks] = bb->index;
basic_block bb;
struct loop *loop0, *loop1;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb_in_sese_p (bb, region))
{
struct loop *loop = bb->loop_father;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
sese_build_liveouts_bb (region, liveouts, bb);
if (MAY_HAVE_DEBUG_STMTS)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
sese_reset_debug_liveouts_bb (region, liveouts, bb);
}
been used. */
crtl->sp_is_unchanging = !cfun->calls_alloca;
if (crtl->sp_is_unchanging)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
already_set = XNEWVEC (int, max_gcse_regno);
/* Find all the stores we care about. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* First compute the registers set in this block. */
FOR_BB_INSNS (bb, insn)
bitmap_vector_clear (st_transp, last_basic_block_for_fn (cfun));
regs_set_in_block = XNEWVEC (int, max_gcse_regno);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
memset (regs_set_in_block, 0, sizeof (int) * max_gcse_regno);
/* Now we want to insert the new stores which are going to be needed. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bitmap_bit_p (st_delete_map[bb->index], ptr->index))
{
delete_store (ptr, bb);
+2013-12-09 David Malcolm <dmalcolm@redhat.com>
+
+ * g++.dg/plugin/selfassign.c (execute_warn_self_assign): Eliminate
+ use of FOR_EACH_BB in favor of FOR_EACH_BB_FN, to make use of cfun
+ explicit.
+ * gcc.dg/plugin/selfassign.c (execute_warn_self_assign): Likewise.
+
2013-12-09 Richard Earnshaw <rearnsha@arm.com>
* gcc.target/arm/ldrd-strd-offset.c: New.
gimple_stmt_iterator gsi;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
warn_self_assign (gsi_stmt (gsi));
gimple_stmt_iterator gsi;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
warn_self_assign (gsi_stmt (gsi));
branch_ratio_cutoff =
(REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
int n = count_insns (bb);
if (!ignore_bb_p (bb))
certainly don't need it to calculate CDI_DOMINATOR info. */
gate_tm_init ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bb->flags &= ~BB_IN_TRANSACTION;
for (region = all_tm_regions; region; region = region->next)
gimple_stmt_iterator i;
bool something_changed = false;
auto_vec<gimple> cond_dead_built_in_calls;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Collect dead call candidates. */
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
}
/* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gsi = gsi_last_bb (bb);
stmt = gsi_stmt (gsi);
Examine the last statement in each basic block to see if the block
ends with a computed goto. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple last;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple stmt = last_stmt (bb);
EDGE_FALLTHRU);
/* Traverse the basic block array placing edges. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple last = last_stmt (bb);
bool fallthru;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
basic_block target_bb;
gimple_stmt_iterator gsi;
- FOR_EACH_BB (target_bb)
+ FOR_EACH_BB_FN (target_bb, cfun)
{
for (gsi = gsi_start_bb (target_bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
/* Find a suitable label for each block. We use the first user-defined
label if there is one, or otherwise just the first label we see. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
/* Now redirect all jumps/branches to the selected label.
First do so for each block ending in a control statement. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple stmt = last_stmt (bb);
tree label, new_label;
/* Finally, purge dead labels. All user-defined labels and labels that
can be the target of non-local gotos and labels which have their
address taken are preserved. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
tree label_for_this_bb = label_for_bb[bb->index].label;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple stmt = last_stmt (bb);
if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
SCALE (size), LABEL (size));
num_edges = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
num_edges += EDGE_COUNT (bb->succs);
size = num_edges * sizeof (struct edge_def);
total += size;
err = 1;
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool found_ctrl_stmt = false;
if (verbosity >= 1)
{
fprintf (file, "%s{\n", s_indent);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->loop_father == loop)
print_loops_bb (file, bb, indent, verbosity);
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
e->count = apply_scale (e->count, count_scale);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bb->count = apply_scale (bb->count, count_scale);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
recording of edge to CASE_LABEL_EXPR. */
start_recording_case_labels ();
- /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB,
+ /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB_FN,
since the basic blocks may get removed. */
n = last_basic_block_for_fn (cfun);
for (i = NUM_FIXED_BLOCKS; i < n; i++)
calculate_dominance_info (CDI_DOMINATORS);
/* Find all PHI nodes that we may be able to merge. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
basic_block dest;
gimple phi;
bool saw_a_complex_op = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
/* ??? Ideally we'd traverse the blocks in breadth-first order. */
old_last_basic_block = last_basic_block_for_fn (cfun);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
if (bb->index >= old_last_basic_block)
continue;
memset ((void *)dfa_stats_p, 0, sizeof (struct dfa_stats_d));
/* Walk all the statements in the function counting references. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
memset (&wi, '\0', sizeof (wi));
wi.info = (void *) &decl_list;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
mnt_map = pointer_map_create ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple last = last_stmt (bb);
if (last && is_gimple_resx (last))
assign_filter_values ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple last = last_stmt (bb);
if (last == NULL)
else
lp_reachable = NULL;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
create a node for it. */
d.builtin_node = cgraph_get_create_node (d.builtin_decl);
- FOR_EACH_BB (d.bb)
+ FOR_EACH_BB_FN (d.bb, cfun)
{
gimple_stmt_iterator gsi;
unsigned int i, nedge;
#ifdef ENABLE_CHECKING
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
gcc_assert (!bb->aux);
}
#endif
will split id->current_basic_block, and the new blocks will
follow it; we'll trudge through them, processing their CALL_EXPRs
along the way. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
inlined_p |= gimple_expand_calls_inline (bb, &id);
pop_gimplify_context (NULL);
/* Initialize dominance frontier. */
dfs = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_initialize (&dfs[bb->index], &bitmap_default_obstack);
/* 1- Compute dominance frontiers. */
rewrite_blocks (ENTRY_BLOCK_PTR_FOR_FN (cfun), REWRITE_ALL);
/* Free allocated memory. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_clear (&dfs[bb->index]);
free (dfs);
/* If the caller requested PHI nodes to be added, compute
dominance frontiers. */
dfs = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_initialize (&dfs[bb->index], &bitmap_default_obstack);
compute_dominance_frontiers (dfs);
insert_updated_phi_nodes_for (sym, dfs, blocks_to_update,
update_flags);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_clear (&dfs[bb->index]);
free (dfs);
return 0;
/* Look through each block for assignments to the RESULT_DECL. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
RESULT. */
data.var = found;
data.result = result;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
{
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
compute_object_sizes (void)
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
gimple_stmt_iterator gsi;
tree result;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
{
/* Search for PHIs where the destination has no partition, but one
or more arguments has a partition. This should not happen and can
create incorrect code. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
mark_dfs_back_edges ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Mark block as possibly needing calculation of UIDs. */
bb->aux = &bb->aux;
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (number_of_loops (cfun) <= 1)
return 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
loop = bb->loop_father;
basic_block bb;
bool ret = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
bool cfg_changed = false;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi))
auto_vec<basic_block> queue (last_basic_block_for_fn (cfun));
queue.quick_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
queue.quick_push (bb);
bb->aux = bb;
bool cfg_changed = false;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
if (!encountered_recursive_call)
return;
- FOR_EACH_BB (this_block)
+ FOR_EACH_BB_FN (this_block, cfun)
{
gimple_stmt_iterator gsi;
const_val = XCNEWVEC (prop_value_t, n_const_val);
/* Initialize simulation flags for PHI nodes and statements. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
/* Now process PHI nodes. We never clear the simulate_again flag on
phi nodes, since we do not know which edges are executable yet,
except for phi nodes for virtual operands when we do not do store ccp. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
basic_block bb;
unsigned int todoflags = 0;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
for (i = gsi_start_bb (bb); !gsi_end_p (i); )
live = new_live_track (map);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
map = init_var_map (num_ssa_names);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
tree arg;
in the coalesce list because they do not need to be sorted, and simply
consume extra memory/compilation time in large programs. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->preds)
if (e->flags & EDGE_ABNORMAL)
n_copy_of = num_ssa_names;
copy_of = XCNEWVEC (prop_value_t, n_copy_of);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
int depth = bb_loop_depth (bb);
map = init_var_map (num_ssa_names);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Scan for real copies. */
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Treat PHI nodes as copies between the result and each argument. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
gimple phi, stmt;
int flags;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* PHI nodes are never inherently necessary. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
struct loop *loop;
scev_initialize ();
if (mark_irreducible_loops ())
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
}
}
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Remove dead PHI nodes. */
something_changed |= remove_dead_phis (bb);
edge_iterator ei;
edge e;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->preds)
{
{
gimple_stmt_iterator gsi;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
update_stmt_if_modified (gsi_stmt (gsi));
cfg_changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
basic_block bb;
gimple_stmt_iterator gsi;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
unsigned i;
usedvars = BITMAP_ALLOC (NULL);
/* Walk the CFG marking all referenced symbols. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
size_t i;
ignores them, and the second pass (if there were any) tries to remove
them. */
if (have_local_clobbers)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
live->num_blocks = last_basic_block_for_fn (cfun);
live->livein = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_initialize (&live->livein[bb->index], &liveness_bitmap_obstack);
live->liveout = XNEWVEC (bitmap_head, last_basic_block_for_fn (cfun));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_initialize (&live->liveout[bb->index], &liveness_bitmap_obstack);
live->work_stack = XNEWVEC (int, last_basic_block_for_fn (cfun));
edge_iterator ei;
/* live on entry calculations used liveout vectors for defs, clear them. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
bitmap_clear (&liveinfo->liveout[bb->index]);
/* Set all the live-on-exit bits for uses in PHIs. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
size_t i;
if ((flag & LIVEDUMP_ENTRY) && live->livein)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (f, "\nLive on entry to BB%d : ", bb->index);
EXECUTE_IF_SET_IN_BITMAP (&live->livein[bb->index], 0, i, bi)
if ((flag & LIVEDUMP_EXIT) && live->liveout)
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (f, "\nLive on exit from BB%d : ", bb->index);
EXECUTE_IF_SET_IN_BITMAP (&live->liveout[bb->index], 0, i, bi)
loops postorder. */
i = 0;
bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
if (bb->loop_father != current_loops->tree_root)
bbs[i++] = bb;
n = i;
struct loop *loop;
bitmap_clear (contains_call);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
free_aux_for_edges ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
SET_ALWAYS_EXECUTED_IN (bb, NULL);
bitmap_obstack_release (&lim_bitmap_obstack);
EXECUTE_IF_SET_IN_BITMAP (changed_bbs, 0, index, bi)
find_uses_to_rename_bb (BASIC_BLOCK_FOR_FN (cfun, index), use_blocks, need_phis);
else
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
find_uses_to_rename_bb (bb, use_blocks, need_phis);
}
timevar_push (TV_VERIFY_LOOP_CLOSED);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
{
calculate_dominance_info (CDI_POST_DOMINATORS);
#ifdef ENABLE_CHECKING
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
gcc_assert (!bb->aux);
#endif
execute_cse_reciprocals_1 (NULL, name);
}
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
gimple phi;
calculate_dominance_info (CDI_DOMINATORS);
memset (&sincos_stats, 0, sizeof (sincos_stats));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
bool cleanup_eh = false;
memset (&bswap_stats, 0, sizeof (bswap_stats));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
}
/* Propagate into all uses and fold. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
intra_create_variable_infos ();
/* Now walk all statements and build the constraint set. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
}
/* Compute the call-used/clobbered sets. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
same_succ same = same_succ_alloc ();
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
find_same_succ_bb (bb, &same);
if (same == NULL)
for (i = 0; i < all_clusters.length (); ++i)
delete_cluster (all_clusters[i]);
all_clusters.truncate (0);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
BB_CLUSTER (bb) = NULL;
}
bitmap_obstack_initialize (&ter_bitmap_obstack);
table = new_temp_expr_table (map);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
find_replaceable_in_bb (table, bb);
gcc_checking_assert (bitmap_empty_p (table->partition_in_use));
ahead and thread it, else ignore it. */
basic_block bb;
edge e;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* If we do end up threading here, we can remove elements from
BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
/* Walk over each block. If the block ends with a control statement,
then it might create a useful equivalence. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple stmt;
/* we just need to empty elements out of the hash table, and cleanup the
AUX field on the edges. */
val_ssa_equiv.dispose ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
edge_iterator ei;
gimple_stmt_iterator gsi;
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), bb);
added_to_worklist = pointer_set_create ();
/* Initialize worklist */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
/* Now verify all the uses and make sure they agree with the definitions
found in the previous pass. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge e;
gimple phi;
/* Collect into ADDRESSES_TAKEN all variables whose address is taken within
the function body. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
variables and operands need to be rewritten to expose bare symbols. */
if (!bitmap_empty_p (suitable_for_renaming))
{
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
gimple stmt = gsi_stmt (gsi);
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
|| TREE_TYPE (cfun_va_list) == char_type_node);
gcc_assert (is_gimple_reg_type (cfun_va_list) == va_list_simple_ptr);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
memset (&wi, 0, sizeof (wi));
wi.info = si.va_list_vars;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
const char *failure_reason;
gimple stmt = last_stmt (bb);
basic_block bb;
bool cfg_changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator i;
wi.info = &ns;
ns.htab = htab;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
init_stmt_vec_info_vec ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
vect_location = find_bb_location (bb);
basic_block bb;
gimple_stmt_iterator si;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
edge e;
/* Note that the BSI iterator bump happens at the bottom of the
loop and no bump is necessary if we're removing the statement
referenced by the current BSI. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
{
gimple stmt = gsi_stmt (si);
vr_value = XCNEWVEC (value_range_t *, num_vr_values);
vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator si;
I doubt it's worth the effort for the classes of jump
threading opportunities we are trying to identify at this
point in compilation. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
gimple last;
gimple_stmt_iterator gsi;
bool fentry_exit_instrument = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
fentry_exit_instrument |= instrument_gimple (&gsi);
return fentry_exit_instrument;
basic_block bb;
gimple_stmt_iterator gsi;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
error_found = false;
visited_hists = pointer_set_create ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
gimple_stmt_iterator gsi;
bool changed = false;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
histogram_value hist = NULL;
values->create (0);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
gimple_values_to_profile (gsi_stmt (gsi), values);
in_pending = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (in_worklist);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
fibheap_insert (pending, bb_order[bb->index], bb);
bitmap_ones (in_pending);
}
if (success && MAY_HAVE_DEBUG_INSNS)
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
gcc_assert (VTI (bb)->flooded);
free (bb_order);
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
fprintf (dump_file, "\nBasic block %d:\n", bb->index);
fprintf (dump_file, "IN:\n");
/* Free memory occupied by the out hash tables, as they aren't used
anymore. */
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
dataflow_set_clear (&VTI (bb)->out);
/* Enable emitting notes by functions (mainly by set_variable_part and
dataflow_set_init (&cur);
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
/* Emit the notes for changes of variable locations between two
subsequent basic blocks. */
vt_add_function_parameters ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
rtx insn;
HOST_WIDE_INT pre, post = 0;
if (!MAY_HAVE_DEBUG_INSNS)
return;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_BB_INSNS_SAFE (bb, insn, next)
if (DEBUG_INSN_P (insn))
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
VTI (bb)->mos.release ();
}