X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=gcc%2Fhaifa-sched.c;h=80687fb5359ec4f27645c4f535c4e47e16c3414c;hb=ddbaab134ca4603f7d4ac0d1646f40f9c13405ff;hp=4a899b56173dd7d67db084ed86d24ad865ccadcd;hpb=18fbe394d62371bedaa41ed32c89c659109ae8f5;p=platform%2Fupstream%2Fgcc.git diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c index 4a899b5..80687fb 100644 --- a/gcc/haifa-sched.c +++ b/gcc/haifa-sched.c @@ -1,5 +1,5 @@ /* Instruction scheduling pass. - Copyright (C) 1992-2018 Free Software Foundation, Inc. + Copyright (C) 1992-2020 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) @@ -141,11 +141,11 @@ along with GCC; see the file COPYING3. If not see #include "cfgbuild.h" #include "sched-int.h" #include "common/common-target.h" -#include "params.h" #include "dbgcnt.h" #include "cfgloop.h" #include "dumpfile.h" #include "print-rtl.h" +#include "function-abi.h" #ifdef INSN_SCHEDULING @@ -583,7 +583,7 @@ set_modulo_params (int ii, int max_stages, int insns, int max_uid) modulo_max_stages = max_stages; modulo_n_insns = insns; modulo_iter0_max_uid = max_uid; - modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS); + modulo_backtracks_left = param_max_modulo_backtrack_attempts; } /* A structure to record a pair of insns where the first one is a real @@ -827,7 +827,7 @@ add_delay_dependencies (rtx_insn *insn) /* Forward declarations. */ -static int priority (rtx_insn *); +static int priority (rtx_insn *, bool force_recompute = false); static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *); static int rank_for_schedule (const void *, const void *); static void swap_sort (rtx_insn **, int); @@ -936,7 +936,8 @@ static bitmap tmp_bitmap; /* Effective number of available registers of a given class (see comment in sched_pressure_start_bb). */ static int sched_class_regs_num[N_REG_CLASSES]; -/* Number of call_saved_regs and fixed_regs. Helpers for calculating of +/* The number of registers that the function would need to save before it + uses them, and the number of fixed_regs. Helpers for calculating of sched_class_regs_num. */ static int call_saved_regs_num[N_REG_CLASSES]; static int fixed_regs_num[N_REG_CLASSES]; @@ -1587,7 +1588,7 @@ bool sched_fusion; /* Compute the priority number for INSN. */ static int -priority (rtx_insn *insn) +priority (rtx_insn *insn, bool force_recompute) { if (! INSN_P (insn)) return 0; @@ -1595,7 +1596,7 @@ priority (rtx_insn *insn) /* We should not be interested in priority of an already scheduled insn. */ gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED); - if (!INSN_PRIORITY_KNOWN (insn)) + if (force_recompute || !INSN_PRIORITY_KNOWN (insn)) { int this_priority = -1; @@ -2539,7 +2540,7 @@ enum rfs_decision { RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK, RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION, RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX, - RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_N }; + RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_COST, RFS_N }; /* Corresponding strings for print outs. */ static const char *rfs_str[RFS_N] = { @@ -2547,7 +2548,7 @@ static const char *rfs_str[RFS_N] = { "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK", "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION", "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX", - "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION" }; + "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" }; /* Statistical breakdown of rank_for_schedule decisions. */ struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; }; @@ -2710,7 +2711,7 @@ rank_for_schedule (const void *x, const void *y) if (flag_sched_critical_path_heuristic && priority_val) return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2); - if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) >= 0) + if (param_sched_autopref_queue_depth >= 0) { int autopref = autopref_rank_for_schedule (tmp, tmp2); if (autopref != 0) @@ -2800,6 +2801,14 @@ rank_for_schedule (const void *x, const void *y) if (flag_sched_dep_count_heuristic && val != 0) return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2); + /* Sort by INSN_COST rather than INSN_LUID. This means that instructions + which take longer to execute are prioritised and it leads to more + dual-issue opportunities on in-order cores which have this feature. */ + + if (INSN_COST (tmp) != INSN_COST (tmp2)) + return rfs_result (RFS_COST, INSN_COST (tmp2) - INSN_COST (tmp), + tmp, tmp2); + /* If insns are equally good, sort by INSN_LUID (original insn order), so that we make the sort stable. This minimizes instruction movement, thus minimizing sched's effect on debugging and cross-jumping. */ @@ -3403,7 +3412,7 @@ model_remove_from_worklist (struct model_insn_info *insn) } /* Add INSN to the model worklist. Start looking for a suitable position - between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS + between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns insns either side. A null PREV indicates the beginning of the list and a null NEXT indicates the end. */ @@ -3414,7 +3423,7 @@ model_add_to_worklist (struct model_insn_info *insn, { int count; - count = MAX_SCHED_READY_INSNS; + count = param_max_sched_ready_insns; if (count > 0 && prev && model_order_p (insn, prev)) do { @@ -3442,7 +3451,7 @@ model_promote_insn (struct model_insn_info *insn) int count; prev = insn->prev; - count = MAX_SCHED_READY_INSNS; + count = param_max_sched_ready_insns; while (count > 0 && prev && model_order_p (insn, prev)) { count--; @@ -3728,7 +3737,7 @@ model_choose_insn (void) { fprintf (sched_dump, ";;\t+--- worklist:\n"); insn = model_worklist; - count = MAX_SCHED_READY_INSNS; + count = param_max_sched_ready_insns; while (count > 0 && insn) { fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n", @@ -3760,7 +3769,7 @@ model_choose_insn (void) Failing that, just pick the highest-priority instruction in the worklist. */ - count = MAX_SCHED_READY_INSNS; + count = param_max_sched_ready_insns; insn = model_worklist; fallback = 0; for (;;) @@ -4230,6 +4239,15 @@ remove_notes (rtx_insn *head, rtx_insn *tail) if (insn != tail) { remove_insn (insn); + /* If an insn was split just before the EPILOGUE_BEG note and + that split created new basic blocks, we could have a + BASIC_BLOCK note here. Safely advance over it in that case + and assert that we land on a real insn. */ + if (NOTE_P (next) + && NOTE_KIND (next) == NOTE_INSN_BASIC_BLOCK + && next != next_tail) + next = NEXT_INSN (next); + gcc_assert (INSN_P (next)); add_reg_note (next, REG_SAVE_NOTE, GEN_INT (NOTE_INSN_EPILOGUE_BEG)); break; @@ -4702,7 +4720,12 @@ apply_replacement (dep_t dep, bool immediately) success = validate_change (desc->insn, desc->loc, desc->newval, 0); gcc_assert (success); + rtx_insn *insn = DEP_PRO (dep); + + /* Recompute priority since dependent priorities may have changed. */ + priority (insn, true); update_insn_after_change (desc->insn); + if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0) fix_tick_ready (desc->insn); @@ -4756,7 +4779,17 @@ restore_pattern (dep_t dep, bool immediately) success = validate_change (desc->insn, desc->loc, desc->orig, 0); gcc_assert (success); + + rtx_insn *insn = DEP_PRO (dep); + + if (QUEUE_INDEX (insn) != QUEUE_SCHEDULED) + { + /* Recompute priority since dependent priorities may have changed. */ + priority (insn, true); + } + update_insn_after_change (desc->insn); + if (backtrack_queue != NULL) { backtrack_queue->replacement_deps.safe_push (dep); @@ -5122,12 +5155,12 @@ queue_to_ready (struct ready_list *ready) /* If the ready list is full, delay the insn for 1 cycle. See the comment in schedule_block for the rationale. */ if (!reload_completed - && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS + && (ready->n_ready - ready->n_debug > param_max_sched_ready_insns || (sched_pressure == SCHED_PRESSURE_MODEL - /* Limit pressure recalculations to MAX_SCHED_READY_INSNS - instructions too. */ + /* Limit pressure recalculations to + param_max_sched_ready_insns instructions too. */ && model_index (insn) > (model_curr_point - + MAX_SCHED_READY_INSNS))) + + param_max_sched_ready_insns))) && !(sched_pressure == SCHED_PRESSURE_MODEL && model_curr_point < model_num_insns /* Always allow the next model instruction to issue. */ @@ -5408,6 +5441,7 @@ reemit_notes (rtx_insn *insn) last = emit_note_before (note_type, last); remove_note (insn, note); + df_insn_create_insn_record (last); } } } @@ -5718,7 +5752,7 @@ autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index) /* Exit early if the param forbids this or if we're not entering here through normal haifa scheduling. This can happen if selective scheduling is explicitly enabled. */ - if (!insn_queue || PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) <= 0) + if (!insn_queue || param_sched_autopref_queue_depth <= 0) return 0; if (sched_verbose >= 2 && ready_index == 0) @@ -5771,14 +5805,14 @@ autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index) } } - if (PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) == 1) + if (param_sched_autopref_queue_depth == 1) continue; /* Everything from the current queue slot should have been moved to the ready list. */ gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX); - int n_stalls = PARAM_VALUE (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH) - 1; + int n_stalls = param_sched_autopref_queue_depth - 1; if (n_stalls > max_insn_queue_index) n_stalls = max_insn_queue_index; @@ -6527,14 +6561,15 @@ schedule_block (basic_block *target_bb, state_t init_state) time in the worst case. Before reload we are more likely to have big lists so truncate them to a reasonable size. */ if (!reload_completed - && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS) + && ready.n_ready - ready.n_debug > param_max_sched_ready_insns) { ready_sort_debug (&ready); ready_sort_real (&ready); - /* Find first free-standing insn past MAX_SCHED_READY_INSNS. + /* Find first free-standing insn past param_max_sched_ready_insns. If there are debug insns, we know they're first. */ - for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++) + for (i = param_max_sched_ready_insns + ready.n_debug; i < ready.n_ready; + i++) if (!SCHED_GROUP_P (ready_element (&ready, i))) break; @@ -7181,10 +7216,13 @@ alloc_global_sched_pressure_data (void) fixed_regs_num[cl] = 0; for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i) - if (!call_used_regs[ira_class_hard_regs[cl][i]]) - ++call_saved_regs_num[cl]; - else if (fixed_regs[ira_class_hard_regs[cl][i]]) - ++fixed_regs_num[cl]; + { + unsigned int regno = ira_class_hard_regs[cl][i]; + if (fixed_regs[regno]) + ++fixed_regs_num[cl]; + else if (!crtl->abi->clobbers_full_reg_p (regno)) + ++call_saved_regs_num[cl]; + } } } } @@ -7230,7 +7268,7 @@ sched_init (void) && !reload_completed && common_sched_info->sched_pass_id == SCHED_RGN_PASS) sched_pressure = ((enum sched_pressure_algorithm) - PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM)); + param_sched_pressure_algorithm); else sched_pressure = SCHED_PRESSURE_NONE; @@ -7245,11 +7283,10 @@ sched_init (void) if (spec_info->mask != 0) { - spec_info->data_weakness_cutoff = - (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100; - spec_info->control_weakness_cutoff = - (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) - * REG_BR_PROB_BASE) / 100; + spec_info->data_weakness_cutoff + = (param_sched_spec_prob_cutoff * MAX_DEP_WEAK) / 100; + spec_info->control_weakness_cutoff + = (param_sched_spec_prob_cutoff * REG_BR_PROB_BASE) / 100; } else /* So we won't read anything accidentally. */ @@ -8056,7 +8093,7 @@ find_fallthru_edge_from (basic_block pred) if (e) { - gcc_assert (e->dest == succ); + gcc_assert (e->dest == succ || e->dest->index == EXIT_BLOCK); return e; } }