1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This pass implements list scheduling within basic blocks. It is
23 run twice: (1) after flow analysis, but before register allocation,
24 and (2) after register allocation.
26 The first run performs interblock scheduling, moving insns between
27 different blocks in the same "region", and the second runs only
28 basic block scheduling.
30 Interblock motions performed are useful motions and speculative
31 motions, including speculative loads. Motions requiring code
32 duplication are not supported. The identification of motion type
33 and the check for validity of speculative motions requires
34 construction and analysis of the function's control flow graph.
36 The main entry point for this pass is schedule_insns(), called for
37 each function. The work of the scheduler is organized in three
38 levels: (1) function level: insns are subject to splitting,
39 control-flow-graph is constructed, regions are computed (after
40 reload, each region is of one block), (2) region level: control
41 flow graph attributes required for interblock scheduling are
42 computed (dominators, reachability, etc.), data dependences and
43 priorities are computed, and (3) block level: insns in the block
44 are actually scheduled. */
48 #include "coretypes.h"
50 #include "diagnostic-core.h"
53 #include "hard-reg-set.h"
57 #include "insn-config.h"
58 #include "insn-attr.h"
62 #include "sched-int.h"
63 #include "sel-sched.h"
65 #include "tree-pass.h"
68 #ifdef INSN_SCHEDULING
70 /* Some accessor macros for h_i_d members only used within this file. */
71 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
72 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
74 /* nr_inter/spec counts interblock/speculative motion for the function. */
75 static int nr_inter, nr_spec;
77 static int is_cfg_nonregular (void);
79 /* Number of regions in the procedure. */
82 /* Table of region descriptions. */
83 region *rgn_table = NULL;
85 /* Array of lists of regions' blocks. */
86 int *rgn_bb_table = NULL;
88 /* Topological order of blocks in the region (if b2 is reachable from
89 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
90 always referred to by either block or b, while its topological
91 order name (in the region) is referred to by bb. */
92 int *block_to_bb = NULL;
94 /* The number of the region containing a block. */
95 int *containing_rgn = NULL;
97 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
98 Currently we can get a ebb only through splitting of currently
99 scheduling block, therefore, we don't need ebb_head array for every region,
100 hence, its sufficient to hold it for current one only. */
101 int *ebb_head = NULL;
103 /* The minimum probability of reaching a source block so that it will be
104 considered for speculative scheduling. */
105 static int min_spec_prob;
107 static void find_single_block_region (bool);
108 static void find_rgns (void);
109 static bool too_large (int, int *, int *);
111 /* Blocks of the current region being scheduled. */
112 int current_nr_blocks;
115 /* A speculative motion requires checking live information on the path
116 from 'source' to 'target'. The split blocks are those to be checked.
117 After a speculative motion, live information should be modified in
120 Lists of split and update blocks for each candidate of the current
121 target are in array bblst_table. */
122 static basic_block *bblst_table;
123 static int bblst_size, bblst_last;
125 /* Arrays that hold the DFA state at the end of a basic block, to re-use
126 as the initial state at the start of successor blocks. The BB_STATE
127 array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer
128 into BB_STATE for basic block I. FIXME: This should be a vec. */
129 static char *bb_state_array = NULL;
130 static state_t *bb_state = NULL;
132 /* Target info declarations.
134 The block currently being scheduled is referred to as the "target" block,
135 while other blocks in the region from which insns can be moved to the
136 target are called "source" blocks. The candidate structure holds info
137 about such sources: are they valid? Speculative? Etc. */
140 basic_block *first_member;
155 static candidate *candidate_table;
156 #define IS_VALID(src) (candidate_table[src].is_valid)
157 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
158 #define IS_SPECULATIVE_INSN(INSN) \
159 (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
160 #define SRC_PROB(src) ( candidate_table[src].src_prob )
162 /* The bb being currently scheduled. */
173 static edge *edgelst_table;
174 static int edgelst_last;
176 static void extract_edgelst (sbitmap, edgelst *);
178 /* Target info functions. */
179 static void split_edges (int, int, edgelst *);
180 static void compute_trg_info (int);
181 void debug_candidate (int);
182 void debug_candidates (int);
184 /* Dominators array: dom[i] contains the sbitmap of dominators of
185 bb i in the region. */
188 /* bb 0 is the only region entry. */
189 #define IS_RGN_ENTRY(bb) (!bb)
191 /* Is bb_src dominated by bb_trg. */
192 #define IS_DOMINATED(bb_src, bb_trg) \
193 ( bitmap_bit_p (dom[bb_src], bb_trg) )
195 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
196 the probability of bb i relative to the region entry. */
199 /* Bit-set of edges, where bit i stands for edge i. */
200 typedef sbitmap edgeset;
202 /* Number of edges in the region. */
203 static int rgn_nr_edges;
205 /* Array of size rgn_nr_edges. */
206 static edge *rgn_edges;
208 /* Mapping from each edge in the graph to its number in the rgn. */
209 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
210 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
212 /* The split edges of a source bb is different for each target
213 bb. In order to compute this efficiently, the 'potential-split edges'
214 are computed for each bb prior to scheduling a region. This is actually
215 the split edges of each bb relative to the region entry.
217 pot_split[bb] is the set of potential split edges of bb. */
218 static edgeset *pot_split;
220 /* For every bb, a set of its ancestor edges. */
221 static edgeset *ancestor_edges;
223 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
225 /* Speculative scheduling functions. */
226 static int check_live_1 (int, rtx);
227 static void update_live_1 (int, rtx);
228 static int is_pfree (rtx, int, int);
229 static int find_conditional_protection (rtx, int);
230 static int is_conditionally_protected (rtx, int, int);
231 static int is_prisky (rtx, int, int);
232 static int is_exception_free (rtx, int, int);
234 static bool sets_likely_spilled (rtx);
235 static void sets_likely_spilled_1 (rtx, const_rtx, void *);
236 static void add_branch_dependences (rtx, rtx);
237 static void compute_block_dependences (int);
239 static void schedule_region (int);
240 static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
241 static void propagate_deps (int, struct deps_desc *);
242 static void free_pending_lists (void);
244 /* Functions for construction of the control flow graph. */
246 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
248 We decide not to build the control flow graph if there is possibly more
249 than one entry to the function, if computed branches exist, if we
250 have nonlocal gotos, or if we have an unreachable loop. */
253 is_cfg_nonregular (void)
258 /* If we have a label that could be the target of a nonlocal goto, then
259 the cfg is not well structured. */
260 if (nonlocal_goto_handler_labels)
263 /* If we have any forced labels, then the cfg is not well structured. */
267 /* If we have exception handlers, then we consider the cfg not well
268 structured. ?!? We should be able to handle this now that we
269 compute an accurate cfg for EH. */
270 if (current_function_has_exception_handlers ())
273 /* If we have insns which refer to labels as non-jumped-to operands,
274 then we consider the cfg not well structured. */
276 FOR_BB_INSNS (b, insn)
278 rtx note, next, set, dest;
280 /* If this function has a computed jump, then we consider the cfg
281 not well structured. */
282 if (JUMP_P (insn) && computed_jump_p (insn))
288 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
289 if (note == NULL_RTX)
292 /* For that label not to be seen as a referred-to label, this
293 must be a single-set which is feeding a jump *only*. This
294 could be a conditional jump with the label split off for
295 machine-specific reasons or a casesi/tablejump. */
296 next = next_nonnote_insn (insn);
299 || (JUMP_LABEL (next) != XEXP (note, 0)
300 && find_reg_note (next, REG_LABEL_TARGET,
301 XEXP (note, 0)) == NULL_RTX)
302 || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
305 set = single_set (insn);
309 dest = SET_DEST (set);
310 if (!REG_P (dest) || !dead_or_set_p (next, dest))
314 /* Unreachable loops with more than one basic block are detected
315 during the DFS traversal in find_rgns.
317 Unreachable loops with a single block are detected here. This
318 test is redundant with the one in find_rgns, but it's much
319 cheaper to go ahead and catch the trivial case here. */
322 if (EDGE_COUNT (b->preds) == 0
323 || (single_pred_p (b)
324 && single_pred (b) == b))
328 /* All the tests passed. Consider the cfg well structured. */
332 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
335 extract_edgelst (sbitmap set, edgelst *el)
338 sbitmap_iterator sbi;
340 /* edgelst table space is reused in each call to extract_edgelst. */
343 el->first_member = &edgelst_table[edgelst_last];
346 /* Iterate over each word in the bitset. */
347 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi)
349 edgelst_table[edgelst_last++] = rgn_edges[i];
354 /* Functions for the construction of regions. */
356 /* Print the regions, for debugging purposes. Callable from debugger. */
363 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
364 for (rgn = 0; rgn < nr_regions; rgn++)
366 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
367 rgn_table[rgn].rgn_nr_blocks);
368 fprintf (sched_dump, ";;\tbb/block: ");
370 /* We don't have ebb_head initialized yet, so we can't use
372 current_blocks = RGN_BLOCKS (rgn);
374 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
375 fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
377 fprintf (sched_dump, "\n\n");
381 /* Print the region's basic blocks. */
384 debug_region (int rgn)
388 fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn);
389 fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
390 rgn_table[rgn].rgn_nr_blocks);
391 fprintf (stderr, ";;\tbb/block: ");
393 /* We don't have ebb_head initialized yet, so we can't use
395 current_blocks = RGN_BLOCKS (rgn);
397 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
398 fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
400 fprintf (stderr, "\n\n");
402 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
405 BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]),
406 0, TDF_SLIM | TDF_BLOCKS);
407 fprintf (stderr, "\n");
410 fprintf (stderr, "\n");
414 /* True when a bb with index BB_INDEX contained in region RGN. */
416 bb_in_region_p (int bb_index, int rgn)
420 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
421 if (rgn_bb_table[current_blocks + i] == bb_index)
427 /* Dump region RGN to file F using dot syntax. */
429 dump_region_dot (FILE *f, int rgn)
433 fprintf (f, "digraph Region_%d {\n", rgn);
435 /* We don't have ebb_head initialized yet, so we can't use
437 current_blocks = RGN_BLOCKS (rgn);
439 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
443 int src_bb_num = rgn_bb_table[current_blocks + i];
444 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num);
446 FOR_EACH_EDGE (e, ei, bb->succs)
447 if (bb_in_region_p (e->dest->index, rgn))
448 fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
453 /* The same, but first open a file specified by FNAME. */
455 dump_region_dot_file (const char *fname, int rgn)
457 FILE *f = fopen (fname, "wt");
458 dump_region_dot (f, rgn);
462 /* Build a single block region for each basic block in the function.
463 This allows for using the same code for interblock and basic block
467 find_single_block_region (bool ebbs_p)
469 basic_block bb, ebb_start;
475 int probability_cutoff;
476 if (profile_info && flag_branch_probabilities)
477 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
479 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
480 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
482 FOR_EACH_BB (ebb_start)
484 RGN_NR_BLOCKS (nr_regions) = 0;
485 RGN_BLOCKS (nr_regions) = i;
486 RGN_DONT_CALC_DEPS (nr_regions) = 0;
487 RGN_HAS_REAL_EBB (nr_regions) = 0;
489 for (bb = ebb_start; ; bb = bb->next_bb)
493 rgn_bb_table[i] = bb->index;
494 RGN_NR_BLOCKS (nr_regions)++;
495 CONTAINING_RGN (bb->index) = nr_regions;
496 BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
499 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
500 || LABEL_P (BB_HEAD (bb->next_bb)))
503 e = find_fallthru_edge (bb->succs);
506 if (e->probability <= probability_cutoff)
517 rgn_bb_table[nr_regions] = bb->index;
518 RGN_NR_BLOCKS (nr_regions) = 1;
519 RGN_BLOCKS (nr_regions) = nr_regions;
520 RGN_DONT_CALC_DEPS (nr_regions) = 0;
521 RGN_HAS_REAL_EBB (nr_regions) = 0;
523 CONTAINING_RGN (bb->index) = nr_regions;
524 BLOCK_TO_BB (bb->index) = 0;
529 /* Estimate number of the insns in the BB. */
531 rgn_estimate_number_of_insns (basic_block bb)
535 count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
537 if (MAY_HAVE_DEBUG_INSNS)
541 FOR_BB_INSNS (bb, insn)
542 if (DEBUG_INSN_P (insn))
549 /* Update number of blocks and the estimate for number of insns
550 in the region. Return true if the region is "too large" for interblock
551 scheduling (compile time considerations). */
554 too_large (int block, int *num_bbs, int *num_insns)
557 (*num_insns) += (common_sched_info->estimate_number_of_insns
558 (BASIC_BLOCK_FOR_FN (cfun, block)));
560 return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
561 || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
564 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
565 is still an inner loop. Put in max_hdr[blk] the header of the most inner
566 loop containing blk. */
567 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
569 if (max_hdr[blk] == -1) \
570 max_hdr[blk] = hdr; \
571 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
572 bitmap_clear_bit (inner, hdr); \
573 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
575 bitmap_clear_bit (inner,max_hdr[blk]); \
576 max_hdr[blk] = hdr; \
580 /* Find regions for interblock scheduling.
582 A region for scheduling can be:
584 * A loop-free procedure, or
586 * A reducible inner loop, or
588 * A basic block not contained in any other region.
590 ?!? In theory we could build other regions based on extended basic
591 blocks or reverse extended basic blocks. Is it worth the trouble?
593 Loop blocks that form a region are put into the region's block list
594 in topological order.
596 This procedure stores its results into the following global (ick) variables
604 We use dominator relationships to avoid making regions out of non-reducible
607 This procedure needs to be converted to work on pred/succ lists instead
608 of edge tables. That would simplify it somewhat. */
611 haifa_find_rgns (void)
613 int *max_hdr, *dfs_nr, *degree;
615 int node, child, loop_head, i, head, tail;
616 int count = 0, sp, idx = 0;
617 edge_iterator current_edge;
618 edge_iterator *stack;
619 int num_bbs, num_insns, unreachable;
620 int too_large_failure;
623 /* Note if a block is a natural loop header. */
626 /* Note if a block is a natural inner loop header. */
629 /* Note if a block is in the block queue. */
632 /* Note if a block is in the block queue. */
635 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
636 and a mapping from block to its loop header (if the block is contained
639 Store results in HEADER, INNER, and MAX_HDR respectively, these will
640 be used as inputs to the second traversal.
642 STACK, SP and DFS_NR are only used during the first traversal. */
644 /* Allocate and initialize variables for the first traversal. */
645 max_hdr = XNEWVEC (int, last_basic_block);
646 dfs_nr = XCNEWVEC (int, last_basic_block);
647 stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
649 inner = sbitmap_alloc (last_basic_block);
652 header = sbitmap_alloc (last_basic_block);
653 bitmap_clear (header);
655 in_queue = sbitmap_alloc (last_basic_block);
656 bitmap_clear (in_queue);
658 in_stack = sbitmap_alloc (last_basic_block);
659 bitmap_clear (in_stack);
661 for (i = 0; i < last_basic_block; i++)
664 #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
665 #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
667 /* DFS traversal to find inner loops in the cfg. */
669 current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs);
674 if (EDGE_PASSED (current_edge))
676 /* We have reached a leaf node or a node that was already
677 processed. Pop edges off the stack until we find
678 an edge that has not yet been processed. */
679 while (sp >= 0 && EDGE_PASSED (current_edge))
681 /* Pop entry off the stack. */
682 current_edge = stack[sp--];
683 node = ei_edge (current_edge)->src->index;
684 gcc_assert (node != ENTRY_BLOCK);
685 child = ei_edge (current_edge)->dest->index;
686 gcc_assert (child != EXIT_BLOCK);
687 bitmap_clear_bit (in_stack, child);
688 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
689 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
690 ei_next (¤t_edge);
693 /* See if have finished the DFS tree traversal. */
694 if (sp < 0 && EDGE_PASSED (current_edge))
697 /* Nope, continue the traversal with the popped node. */
701 /* Process a node. */
702 node = ei_edge (current_edge)->src->index;
703 gcc_assert (node != ENTRY_BLOCK);
704 bitmap_set_bit (in_stack, node);
705 dfs_nr[node] = ++count;
707 /* We don't traverse to the exit block. */
708 child = ei_edge (current_edge)->dest->index;
709 if (child == EXIT_BLOCK)
711 SET_EDGE_PASSED (current_edge);
712 ei_next (¤t_edge);
716 /* If the successor is in the stack, then we've found a loop.
717 Mark the loop, if it is not a natural loop, then it will
718 be rejected during the second traversal. */
719 if (bitmap_bit_p (in_stack, child))
722 bitmap_set_bit (header, child);
723 UPDATE_LOOP_RELATIONS (node, child);
724 SET_EDGE_PASSED (current_edge);
725 ei_next (¤t_edge);
729 /* If the child was already visited, then there is no need to visit
730 it again. Just update the loop relationships and restart
734 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
735 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
736 SET_EDGE_PASSED (current_edge);
737 ei_next (¤t_edge);
741 /* Push an entry on the stack and continue DFS traversal. */
742 stack[++sp] = current_edge;
743 SET_EDGE_PASSED (current_edge);
744 current_edge = ei_start (ei_edge (current_edge)->dest->succs);
747 /* Reset ->aux field used by EDGE_PASSED. */
752 FOR_EACH_EDGE (e, ei, bb->succs)
757 /* Another check for unreachable blocks. The earlier test in
758 is_cfg_nonregular only finds unreachable blocks that do not
761 The DFS traversal will mark every block that is reachable from
762 the entry node by placing a nonzero value in dfs_nr. Thus if
763 dfs_nr is zero for any block, then it must be unreachable. */
766 if (dfs_nr[bb->index] == 0)
772 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
773 to hold degree counts. */
777 degree[bb->index] = EDGE_COUNT (bb->preds);
779 /* Do not perform region scheduling if there are any unreachable
783 int *queue, *degree1 = NULL;
784 /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
785 there basic blocks, which are forced to be region heads.
786 This is done to try to assemble few smaller regions
787 from a too_large region. */
788 sbitmap extended_rgn_header = NULL;
789 bool extend_regions_p;
792 bitmap_set_bit (header, 0);
794 /* Second traversal:find reducible inner loops and topologically sort
795 block of each region. */
797 queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
799 extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
800 if (extend_regions_p)
802 degree1 = XNEWVEC (int, last_basic_block);
803 extended_rgn_header = sbitmap_alloc (last_basic_block);
804 bitmap_clear (extended_rgn_header);
807 /* Find blocks which are inner loop headers. We still have non-reducible
808 loops to consider at this point. */
811 if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
817 /* Now check that the loop is reducible. We do this separate
818 from finding inner loops so that we do not find a reducible
819 loop which contains an inner non-reducible loop.
821 A simple way to find reducible/natural loops is to verify
822 that each block in the loop is dominated by the loop
825 If there exists a block that is not dominated by the loop
826 header, then the block is reachable from outside the loop
827 and thus the loop is not a natural loop. */
830 /* First identify blocks in the loop, except for the loop
832 if (bb->index == max_hdr[jbb->index] && bb != jbb)
834 /* Now verify that the block is dominated by the loop
836 if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
841 /* If we exited the loop early, then I is the header of
842 a non-reducible loop and we should quit processing it
844 if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun))
847 /* I is a header of an inner loop, or block 0 in a subroutine
848 with no loops at all. */
850 too_large_failure = 0;
851 loop_head = max_hdr[bb->index];
853 if (extend_regions_p)
854 /* We save degree in case when we meet a too_large region
855 and cancel it. We need a correct degree later when
856 calling extend_rgns. */
857 memcpy (degree1, degree, last_basic_block * sizeof (int));
859 /* Decrease degree of all I's successors for topological
861 FOR_EACH_EDGE (e, ei, bb->succs)
862 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
863 --degree[e->dest->index];
865 /* Estimate # insns, and count # blocks in the region. */
867 num_insns = common_sched_info->estimate_number_of_insns (bb);
869 /* Find all loop latches (blocks with back edges to the loop
870 header) or all the leaf blocks in the cfg has no loops.
872 Place those blocks into the queue. */
876 /* Leaf nodes have only a single successor which must
878 if (single_succ_p (jbb)
879 && single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
881 queue[++tail] = jbb->index;
882 bitmap_set_bit (in_queue, jbb->index);
884 if (too_large (jbb->index, &num_bbs, &num_insns))
886 too_large_failure = 1;
895 FOR_EACH_EDGE (e, ei, bb->preds)
897 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
900 node = e->src->index;
902 if (max_hdr[node] == loop_head && node != bb->index)
904 /* This is a loop latch. */
905 queue[++tail] = node;
906 bitmap_set_bit (in_queue, node);
908 if (too_large (node, &num_bbs, &num_insns))
910 too_large_failure = 1;
917 /* Now add all the blocks in the loop to the queue.
919 We know the loop is a natural loop; however the algorithm
920 above will not always mark certain blocks as being in the
928 The algorithm in the DFS traversal may not mark B & D as part
929 of the loop (i.e. they will not have max_hdr set to A).
931 We know they can not be loop latches (else they would have
932 had max_hdr set since they'd have a backedge to a dominator
933 block). So we don't need them on the initial queue.
935 We know they are part of the loop because they are dominated
936 by the loop header and can be reached by a backwards walk of
937 the edges starting with nodes on the initial queue.
939 It is safe and desirable to include those nodes in the
940 loop/scheduling region. To do so we would need to decrease
941 the degree of a node if it is the target of a backedge
942 within the loop itself as the node is placed in the queue.
944 We do not do this because I'm not sure that the actual
945 scheduling code will properly handle this case. ?!? */
947 while (head < tail && !too_large_failure)
950 child = queue[++head];
952 FOR_EACH_EDGE (e, ei,
953 BASIC_BLOCK_FOR_FN (cfun, child)->preds)
955 node = e->src->index;
957 /* See discussion above about nodes not marked as in
958 this loop during the initial DFS traversal. */
959 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
960 || max_hdr[node] != loop_head)
965 else if (!bitmap_bit_p (in_queue, node) && node != bb->index)
967 queue[++tail] = node;
968 bitmap_set_bit (in_queue, node);
970 if (too_large (node, &num_bbs, &num_insns))
972 too_large_failure = 1;
979 if (tail >= 0 && !too_large_failure)
981 /* Place the loop header into list of region blocks. */
982 degree[bb->index] = -1;
983 rgn_bb_table[idx] = bb->index;
984 RGN_NR_BLOCKS (nr_regions) = num_bbs;
985 RGN_BLOCKS (nr_regions) = idx++;
986 RGN_DONT_CALC_DEPS (nr_regions) = 0;
987 RGN_HAS_REAL_EBB (nr_regions) = 0;
988 CONTAINING_RGN (bb->index) = nr_regions;
989 BLOCK_TO_BB (bb->index) = count = 0;
991 /* Remove blocks from queue[] when their in degree
992 becomes zero. Repeat until no blocks are left on the
993 list. This produces a topological list of blocks in
1000 if (degree[child] == 0)
1005 rgn_bb_table[idx++] = child;
1006 BLOCK_TO_BB (child) = ++count;
1007 CONTAINING_RGN (child) = nr_regions;
1008 queue[head] = queue[tail--];
1010 FOR_EACH_EDGE (e, ei,
1011 BASIC_BLOCK_FOR_FN (cfun,
1013 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1014 --degree[e->dest->index];
1021 else if (extend_regions_p)
1023 /* Restore DEGREE. */
1029 /* And force successors of BB to be region heads.
1030 This may provide several smaller regions instead
1031 of one too_large region. */
1032 FOR_EACH_EDGE (e, ei, bb->succs)
1033 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1034 bitmap_set_bit (extended_rgn_header, e->dest->index);
1040 if (extend_regions_p)
1044 bitmap_ior (header, header, extended_rgn_header);
1045 sbitmap_free (extended_rgn_header);
1047 extend_rgns (degree, &idx, header, max_hdr);
1051 /* Any block that did not end up in a region is placed into a region
1054 if (degree[bb->index] >= 0)
1056 rgn_bb_table[idx] = bb->index;
1057 RGN_NR_BLOCKS (nr_regions) = 1;
1058 RGN_BLOCKS (nr_regions) = idx++;
1059 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1060 RGN_HAS_REAL_EBB (nr_regions) = 0;
1061 CONTAINING_RGN (bb->index) = nr_regions++;
1062 BLOCK_TO_BB (bb->index) = 0;
1068 sbitmap_free (header);
1069 sbitmap_free (inner);
1070 sbitmap_free (in_queue);
1071 sbitmap_free (in_stack);
1075 /* Wrapper function.
1076 If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1077 regions. Otherwise just call find_rgns_haifa. */
1081 if (sel_sched_p () && flag_sel_sched_pipelining)
1087 static int gather_region_statistics (int **);
1088 static void print_region_statistics (int *, int, int *, int);
1090 /* Calculate the histogram that shows the number of regions having the
1091 given number of basic blocks, and store it in the RSP array. Return
1092 the size of this array. */
1094 gather_region_statistics (int **rsp)
1096 int i, *a = 0, a_sz = 0;
1098 /* a[i] is the number of regions that have (i + 1) basic blocks. */
1099 for (i = 0; i < nr_regions; i++)
1101 int nr_blocks = RGN_NR_BLOCKS (i);
1103 gcc_assert (nr_blocks >= 1);
1105 if (nr_blocks > a_sz)
1107 a = XRESIZEVEC (int, a, nr_blocks);
1110 while (a_sz != nr_blocks);
1120 /* Print regions statistics. S1 and S2 denote the data before and after
1121 calling extend_rgns, respectively. */
1123 print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
1127 /* We iterate until s2_sz because extend_rgns does not decrease
1128 the maximal region size. */
1129 for (i = 1; i < s2_sz; i++)
1143 fprintf (sched_dump, ";; Region extension statistics: size %d: " \
1144 "was %d + %d more\n", i + 1, n1, n2 - n1);
1149 DEGREE - Array of incoming edge count, considering only
1150 the edges, that don't have their sources in formed regions yet.
1151 IDXP - pointer to the next available index in rgn_bb_table.
1152 HEADER - set of all region heads.
1153 LOOP_HDR - mapping from block to the containing loop
1154 (two blocks can reside within one region if they have
1155 the same loop header). */
1157 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
1159 int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
1160 int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
1162 max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
1164 max_hdr = XNEWVEC (int, last_basic_block);
1166 order = XNEWVEC (int, last_basic_block);
1167 post_order_compute (order, false, false);
1169 for (i = nblocks - 1; i >= 0; i--)
1172 if (degree[bbn] >= 0)
1178 /* This block already was processed in find_rgns. */
1182 /* The idea is to topologically walk through CFG in top-down order.
1183 During the traversal, if all the predecessors of a node are
1184 marked to be in the same region (they all have the same max_hdr),
1185 then current node is also marked to be a part of that region.
1186 Otherwise the node starts its own region.
1187 CFG should be traversed until no further changes are made. On each
1188 iteration the set of the region heads is extended (the set of those
1189 blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
1190 set of all basic blocks, thus the algorithm is guaranteed to
1193 while (rescan && iter < max_iter)
1197 for (i = nblocks - 1; i >= 0; i--)
1203 if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn))
1207 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds)
1209 int predn = e->src->index;
1211 if (predn != ENTRY_BLOCK
1212 /* If pred wasn't processed in find_rgns. */
1213 && max_hdr[predn] != -1
1214 /* And pred and bb reside in the same loop.
1215 (Or out of any loop). */
1216 && loop_hdr[bbn] == loop_hdr[predn])
1219 /* Then bb extends the containing region of pred. */
1220 hdr = max_hdr[predn];
1221 else if (hdr != max_hdr[predn])
1222 /* Too bad, there are at least two predecessors
1223 that reside in different regions. Thus, BB should
1224 begin its own region. */
1231 /* BB starts its own region. */
1240 /* If BB start its own region,
1241 update set of headers with BB. */
1242 bitmap_set_bit (header, bbn);
1246 gcc_assert (hdr != -1);
1255 /* Statistics were gathered on the SPEC2000 package of tests with
1256 mainline weekly snapshot gcc-4.1-20051015 on ia64.
1258 Statistics for SPECint:
1259 1 iteration : 1751 cases (38.7%)
1260 2 iterations: 2770 cases (61.3%)
1261 Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1262 Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1263 (We don't count single block regions here).
1265 Statistics for SPECfp:
1266 1 iteration : 621 cases (35.9%)
1267 2 iterations: 1110 cases (64.1%)
1268 Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1269 Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1270 (We don't count single block regions here).
1272 By default we do at most 2 iterations.
1273 This can be overridden with max-sched-extend-regions-iters parameter:
1274 0 - disable region extension,
1275 N > 0 - do at most N iterations. */
1277 if (sched_verbose && iter != 0)
1278 fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
1279 rescan ? "... failed" : "");
1281 if (!rescan && iter != 0)
1283 int *s1 = NULL, s1_sz = 0;
1285 /* Save the old statistics for later printout. */
1286 if (sched_verbose >= 6)
1287 s1_sz = gather_region_statistics (&s1);
1289 /* We have succeeded. Now assemble the regions. */
1290 for (i = nblocks - 1; i >= 0; i--)
1294 if (max_hdr[bbn] == bbn)
1295 /* BBN is a region head. */
1299 int num_bbs = 0, j, num_insns = 0, large;
1301 large = too_large (bbn, &num_bbs, &num_insns);
1304 rgn_bb_table[idx] = bbn;
1305 RGN_BLOCKS (nr_regions) = idx++;
1306 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1307 RGN_HAS_REAL_EBB (nr_regions) = 0;
1308 CONTAINING_RGN (bbn) = nr_regions;
1309 BLOCK_TO_BB (bbn) = 0;
1311 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs)
1312 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1313 degree[e->dest->index]--;
1316 /* Here we check whether the region is too_large. */
1317 for (j = i - 1; j >= 0; j--)
1319 int succn = order[j];
1320 if (max_hdr[succn] == bbn)
1322 if ((large = too_large (succn, &num_bbs, &num_insns)))
1328 /* If the region is too_large, then wrap every block of
1329 the region into single block region.
1330 Here we wrap region head only. Other blocks are
1331 processed in the below cycle. */
1333 RGN_NR_BLOCKS (nr_regions) = 1;
1339 for (j = i - 1; j >= 0; j--)
1341 int succn = order[j];
1343 if (max_hdr[succn] == bbn)
1344 /* This cycle iterates over all basic blocks, that
1345 are supposed to be in the region with head BBN,
1346 and wraps them into that region (or in single
1349 gcc_assert (degree[succn] == 0);
1352 rgn_bb_table[idx] = succn;
1353 BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
1354 CONTAINING_RGN (succn) = nr_regions;
1357 /* Wrap SUCCN into single block region. */
1359 RGN_BLOCKS (nr_regions) = idx;
1360 RGN_NR_BLOCKS (nr_regions) = 1;
1361 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1362 RGN_HAS_REAL_EBB (nr_regions) = 0;
1368 FOR_EACH_EDGE (e, ei,
1369 BASIC_BLOCK_FOR_FN (cfun, succn)->succs)
1370 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1371 degree[e->dest->index]--;
1377 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1383 if (sched_verbose >= 6)
1387 /* Get the new statistics and print the comparison with the
1388 one before calling this function. */
1389 s2_sz = gather_region_statistics (&s2);
1390 print_region_statistics (s1, s1_sz, s2, s2_sz);
1402 /* Functions for regions scheduling information. */
1404 /* Compute dominators, probability, and potential-split-edges of bb.
1405 Assume that these values were already computed for bb's predecessors. */
1408 compute_dom_prob_ps (int bb)
1410 edge_iterator in_ei;
1413 /* We shouldn't have any real ebbs yet. */
1414 gcc_assert (ebb_head [bb] == bb + current_blocks);
1416 if (IS_RGN_ENTRY (bb))
1418 bitmap_set_bit (dom[bb], 0);
1419 prob[bb] = REG_BR_PROB_BASE;
1425 /* Initialize dom[bb] to '111..1'. */
1426 bitmap_ones (dom[bb]);
1428 FOR_EACH_EDGE (in_edge, in_ei,
1429 BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds)
1433 edge_iterator out_ei;
1435 if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1438 pred_bb = BLOCK_TO_BB (in_edge->src->index);
1439 bitmap_and (dom[bb], dom[bb], dom[pred_bb]);
1440 bitmap_ior (ancestor_edges[bb],
1441 ancestor_edges[bb], ancestor_edges[pred_bb]);
1443 bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
1445 bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
1447 FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
1448 bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge));
1450 prob[bb] += combine_probabilities (prob[pred_bb], in_edge->probability);
1451 // The rounding divide in combine_probabilities can result in an extra
1452 // probability increment propagating along 50-50 edges. Eventually when
1453 // the edges re-merge, the accumulated probability can go slightly above
1454 // REG_BR_PROB_BASE.
1455 if (prob[bb] > REG_BR_PROB_BASE)
1456 prob[bb] = REG_BR_PROB_BASE;
1459 bitmap_set_bit (dom[bb], bb);
1460 bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1462 if (sched_verbose >= 2)
1463 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1464 (100 * prob[bb]) / REG_BR_PROB_BASE);
1467 /* Functions for target info. */
1469 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1470 Note that bb_trg dominates bb_src. */
1473 split_edges (int bb_src, int bb_trg, edgelst *bl)
1475 sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src]));
1476 bitmap_copy (src, pot_split[bb_src]);
1478 bitmap_and_compl (src, src, pot_split[bb_trg]);
1479 extract_edgelst (src, bl);
1483 /* Find the valid candidate-source-blocks for the target block TRG, compute
1484 their probability, and check if they are speculative or not.
1485 For speculative sources, compute their update-blocks and split-blocks. */
1488 compute_trg_info (int trg)
1491 edgelst el = { NULL, 0 };
1492 int i, j, k, update_idx;
1498 candidate_table = XNEWVEC (candidate, current_nr_blocks);
1501 /* bblst_table holds split blocks and update blocks for each block after
1502 the current one in the region. split blocks and update blocks are
1503 the TO blocks of region edges, so there can be at most rgn_nr_edges
1505 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1506 bblst_table = XNEWVEC (basic_block, bblst_size);
1509 edgelst_table = XNEWVEC (edge, rgn_nr_edges);
1511 /* Define some of the fields for the target bb as well. */
1512 sp = candidate_table + trg;
1514 sp->is_speculative = 0;
1515 sp->src_prob = REG_BR_PROB_BASE;
1517 visited = sbitmap_alloc (last_basic_block);
1519 for (i = trg + 1; i < current_nr_blocks; i++)
1521 sp = candidate_table + i;
1523 sp->is_valid = IS_DOMINATED (i, trg);
1526 int tf = prob[trg], cf = prob[i];
1528 /* In CFGs with low probability edges TF can possibly be zero. */
1529 sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0);
1530 sp->is_valid = (sp->src_prob >= min_spec_prob);
1535 split_edges (i, trg, &el);
1536 sp->is_speculative = (el.nr_members) ? 1 : 0;
1537 if (sp->is_speculative && !flag_schedule_speculative)
1543 /* Compute split blocks and store them in bblst_table.
1544 The TO block of every split edge is a split block. */
1545 sp->split_bbs.first_member = &bblst_table[bblst_last];
1546 sp->split_bbs.nr_members = el.nr_members;
1547 for (j = 0; j < el.nr_members; bblst_last++, j++)
1548 bblst_table[bblst_last] = el.first_member[j]->dest;
1549 sp->update_bbs.first_member = &bblst_table[bblst_last];
1551 /* Compute update blocks and store them in bblst_table.
1552 For every split edge, look at the FROM block, and check
1553 all out edges. For each out edge that is not a split edge,
1554 add the TO block to the update block list. This list can end
1555 up with a lot of duplicates. We need to weed them out to avoid
1556 overrunning the end of the bblst_table. */
1559 bitmap_clear (visited);
1560 for (j = 0; j < el.nr_members; j++)
1562 block = el.first_member[j]->src;
1563 FOR_EACH_EDGE (e, ei, block->succs)
1565 if (!bitmap_bit_p (visited, e->dest->index))
1567 for (k = 0; k < el.nr_members; k++)
1568 if (e == el.first_member[k])
1571 if (k >= el.nr_members)
1573 bblst_table[bblst_last++] = e->dest;
1574 bitmap_set_bit (visited, e->dest->index);
1580 sp->update_bbs.nr_members = update_idx;
1582 /* Make sure we didn't overrun the end of bblst_table. */
1583 gcc_assert (bblst_last <= bblst_size);
1587 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1589 sp->is_speculative = 0;
1594 sbitmap_free (visited);
1597 /* Free the computed target info. */
1599 free_trg_info (void)
1601 free (candidate_table);
1603 free (edgelst_table);
1606 /* Print candidates info, for debugging purposes. Callable from debugger. */
1609 debug_candidate (int i)
1611 if (!candidate_table[i].is_valid)
1614 if (candidate_table[i].is_speculative)
1617 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1619 fprintf (sched_dump, "split path: ");
1620 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1622 int b = candidate_table[i].split_bbs.first_member[j]->index;
1624 fprintf (sched_dump, " %d ", b);
1626 fprintf (sched_dump, "\n");
1628 fprintf (sched_dump, "update path: ");
1629 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1631 int b = candidate_table[i].update_bbs.first_member[j]->index;
1633 fprintf (sched_dump, " %d ", b);
1635 fprintf (sched_dump, "\n");
1639 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1643 /* Print candidates info, for debugging purposes. Callable from debugger. */
1646 debug_candidates (int trg)
1650 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1651 BB_TO_BLOCK (trg), trg);
1652 for (i = trg + 1; i < current_nr_blocks; i++)
1653 debug_candidate (i);
1656 /* Functions for speculative scheduling. */
1658 static bitmap_head not_in_df;
1660 /* Return 0 if x is a set of a register alive in the beginning of one
1661 of the split-blocks of src, otherwise return 1. */
1664 check_live_1 (int src, rtx x)
1668 rtx reg = SET_DEST (x);
1673 while (GET_CODE (reg) == SUBREG
1674 || GET_CODE (reg) == ZERO_EXTRACT
1675 || GET_CODE (reg) == STRICT_LOW_PART)
1676 reg = XEXP (reg, 0);
1678 if (GET_CODE (reg) == PARALLEL)
1682 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1683 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1684 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1693 regno = REGNO (reg);
1695 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1697 /* Global registers are assumed live. */
1702 if (regno < FIRST_PSEUDO_REGISTER)
1704 /* Check for hard registers. */
1705 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1708 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1710 basic_block b = candidate_table[src].split_bbs.first_member[i];
1711 int t = bitmap_bit_p (¬_in_df, b->index);
1713 /* We can have split blocks, that were recently generated.
1714 Such blocks are always outside current region. */
1715 gcc_assert (!t || (CONTAINING_RGN (b->index)
1716 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1718 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
1725 /* Check for pseudo registers. */
1726 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1728 basic_block b = candidate_table[src].split_bbs.first_member[i];
1729 int t = bitmap_bit_p (¬_in_df, b->index);
1731 gcc_assert (!t || (CONTAINING_RGN (b->index)
1732 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1734 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
1743 /* If x is a set of a register R, mark that R is alive in the beginning
1744 of every update-block of src. */
1747 update_live_1 (int src, rtx x)
1751 rtx reg = SET_DEST (x);
1756 while (GET_CODE (reg) == SUBREG
1757 || GET_CODE (reg) == ZERO_EXTRACT
1758 || GET_CODE (reg) == STRICT_LOW_PART)
1759 reg = XEXP (reg, 0);
1761 if (GET_CODE (reg) == PARALLEL)
1765 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1766 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1767 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1775 /* Global registers are always live, so the code below does not apply
1778 regno = REGNO (reg);
1780 if (! HARD_REGISTER_NUM_P (regno)
1781 || !global_regs[regno])
1783 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1785 basic_block b = candidate_table[src].update_bbs.first_member[i];
1787 if (HARD_REGISTER_NUM_P (regno))
1788 bitmap_set_range (df_get_live_in (b), regno,
1789 hard_regno_nregs[regno][GET_MODE (reg)]);
1791 bitmap_set_bit (df_get_live_in (b), regno);
1796 /* Return 1 if insn can be speculatively moved from block src to trg,
1797 otherwise return 0. Called before first insertion of insn to
1798 ready-list or before the scheduling. */
1801 check_live (rtx insn, int src)
1803 /* Find the registers set by instruction. */
1804 if (GET_CODE (PATTERN (insn)) == SET
1805 || GET_CODE (PATTERN (insn)) == CLOBBER)
1806 return check_live_1 (src, PATTERN (insn));
1807 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1810 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1811 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1812 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1813 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1822 /* Update the live registers info after insn was moved speculatively from
1823 block src to trg. */
1826 update_live (rtx insn, int src)
1828 /* Find the registers set by instruction. */
1829 if (GET_CODE (PATTERN (insn)) == SET
1830 || GET_CODE (PATTERN (insn)) == CLOBBER)
1831 update_live_1 (src, PATTERN (insn));
1832 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1835 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1836 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1837 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1838 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1842 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1843 #define IS_REACHABLE(bb_from, bb_to) \
1845 || IS_RGN_ENTRY (bb_from) \
1846 || (bitmap_bit_p (ancestor_edges[bb_to], \
1847 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK_FOR_FN (cfun, \
1848 BB_TO_BLOCK (bb_from)))))))
1850 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1853 set_spec_fed (rtx load_insn)
1855 sd_iterator_def sd_it;
1858 FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
1859 if (DEP_TYPE (dep) == REG_DEP_TRUE)
1860 FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
1863 /* On the path from the insn to load_insn_bb, find a conditional
1864 branch depending on insn, that guards the speculative load. */
1867 find_conditional_protection (rtx insn, int load_insn_bb)
1869 sd_iterator_def sd_it;
1872 /* Iterate through DEF-USE forward dependences. */
1873 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1875 rtx next = DEP_CON (dep);
1877 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1878 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1879 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1880 && load_insn_bb != INSN_BB (next)
1881 && DEP_TYPE (dep) == REG_DEP_TRUE
1883 || find_conditional_protection (next, load_insn_bb)))
1887 } /* find_conditional_protection */
1889 /* Returns 1 if the same insn1 that participates in the computation
1890 of load_insn's address is feeding a conditional branch that is
1891 guarding on load_insn. This is true if we find two DEF-USE
1893 insn1 -> ... -> conditional-branch
1894 insn1 -> ... -> load_insn,
1895 and if a flow path exists:
1896 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1897 and if insn1 is on the path
1898 region-entry -> ... -> bb_trg -> ... load_insn.
1900 Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1901 Locate the branch by following INSN_FORW_DEPS from insn1. */
1904 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1906 sd_iterator_def sd_it;
1909 FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
1911 rtx insn1 = DEP_PRO (dep);
1913 /* Must be a DEF-USE dependence upon non-branch. */
1914 if (DEP_TYPE (dep) != REG_DEP_TRUE
1918 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1919 if (INSN_BB (insn1) == bb_src
1920 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1921 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1922 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1923 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1926 /* Now search for the conditional-branch. */
1927 if (find_conditional_protection (insn1, bb_src))
1930 /* Recursive step: search another insn1, "above" current insn1. */
1931 return is_conditionally_protected (insn1, bb_src, bb_trg);
1934 /* The chain does not exist. */
1936 } /* is_conditionally_protected */
1938 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1939 load_insn can move speculatively from bb_src to bb_trg. All the
1940 following must hold:
1942 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1943 (2) load_insn and load1 have a def-use dependence upon
1944 the same insn 'insn1'.
1945 (3) either load2 is in bb_trg, or:
1946 - there's only one split-block, and
1947 - load1 is on the escape path, and
1949 From all these we can conclude that the two loads access memory
1950 addresses that differ at most by a constant, and hence if moving
1951 load_insn would cause an exception, it would have been caused by
1955 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1957 sd_iterator_def back_sd_it;
1959 candidate *candp = candidate_table + bb_src;
1961 if (candp->split_bbs.nr_members != 1)
1962 /* Must have exactly one escape block. */
1965 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
1967 rtx insn1 = DEP_PRO (back_dep);
1969 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
1970 /* Found a DEF-USE dependence (insn1, load_insn). */
1972 sd_iterator_def fore_sd_it;
1975 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
1977 rtx insn2 = DEP_CON (fore_dep);
1979 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
1981 /* Found a DEF-USE dependence (insn1, insn2). */
1982 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1983 /* insn2 not guaranteed to be a 1 base reg load. */
1986 if (INSN_BB (insn2) == bb_trg)
1987 /* insn2 is the similar load, in the target block. */
1990 if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
1991 /* insn2 is a similar load, in a split-block. */
1998 /* Couldn't find a similar load. */
2002 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
2003 a load moved speculatively, or if load_insn is protected by
2004 a compare on load_insn's address). */
2007 is_prisky (rtx load_insn, int bb_src, int bb_trg)
2009 if (FED_BY_SPEC_LOAD (load_insn))
2012 if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
2013 /* Dependence may 'hide' out of the region. */
2016 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2022 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2023 Return 1 if insn is exception-free (and the motion is valid)
2027 is_exception_free (rtx insn, int bb_src, int bb_trg)
2029 int insn_class = haifa_classify_insn (insn);
2031 /* Handle non-load insns. */
2042 if (!flag_schedule_speculative_load)
2044 IS_LOAD_INSN (insn) = 1;
2051 case PFREE_CANDIDATE:
2052 if (is_pfree (insn, bb_src, bb_trg))
2054 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
2055 case PRISKY_CANDIDATE:
2056 if (!flag_schedule_speculative_load_dangerous
2057 || is_prisky (insn, bb_src, bb_trg))
2063 return flag_schedule_speculative_load_dangerous;
2066 /* The number of insns from the current block scheduled so far. */
2067 static int sched_target_n_insns;
2068 /* The number of insns from the current block to be scheduled in total. */
2069 static int target_n_insns;
2070 /* The number of insns from the entire region scheduled so far. */
2071 static int sched_n_insns;
2073 /* Implementations of the sched_info functions for region scheduling. */
2074 static void init_ready_list (void);
2075 static int can_schedule_ready_p (rtx);
2076 static void begin_schedule_ready (rtx);
2077 static ds_t new_ready (rtx, ds_t);
2078 static int schedule_more_p (void);
2079 static const char *rgn_print_insn (const_rtx, int);
2080 static int rgn_rank (rtx, rtx);
2081 static void compute_jump_reg_dependencies (rtx, regset);
2083 /* Functions for speculative scheduling. */
2084 static void rgn_add_remove_insn (rtx, int);
2085 static void rgn_add_block (basic_block, basic_block);
2086 static void rgn_fix_recovery_cfg (int, int, int);
2087 static basic_block advance_target_bb (basic_block, rtx);
2089 /* Return nonzero if there are more insns that should be scheduled. */
2092 schedule_more_p (void)
2094 return sched_target_n_insns < target_n_insns;
2097 /* Add all insns that are initially ready to the ready list READY. Called
2098 once before scheduling a set of insns. */
2101 init_ready_list (void)
2103 rtx prev_head = current_sched_info->prev_head;
2104 rtx next_tail = current_sched_info->next_tail;
2109 sched_target_n_insns = 0;
2112 /* Print debugging information. */
2113 if (sched_verbose >= 5)
2114 debug_rgn_dependencies (target_bb);
2116 /* Prepare current target block info. */
2117 if (current_nr_blocks > 1)
2118 compute_trg_info (target_bb);
2120 /* Initialize ready list with all 'ready' insns in target block.
2121 Count number of insns in the target block being scheduled. */
2122 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2124 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2125 TODO_SPEC (insn) = HARD_DEP;
2129 gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
2132 /* Add to ready list all 'ready' insns in valid source blocks.
2133 For speculative insns, check-live, exception-free, and
2135 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2136 if (IS_VALID (bb_src))
2142 get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
2144 src_next_tail = NEXT_INSN (tail);
2147 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2150 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2151 TODO_SPEC (insn) = HARD_DEP;
2157 /* Called after taking INSN from the ready list. Returns nonzero if this
2158 insn can be scheduled, nonzero if we should silently discard it. */
2161 can_schedule_ready_p (rtx insn)
2163 /* An interblock motion? */
2164 if (INSN_BB (insn) != target_bb
2165 && IS_SPECULATIVE_INSN (insn)
2166 && !check_live (insn, INSN_BB (insn)))
2172 /* Updates counter and other information. Split from can_schedule_ready_p ()
2173 because when we schedule insn speculatively then insn passed to
2174 can_schedule_ready_p () differs from the one passed to
2175 begin_schedule_ready (). */
2177 begin_schedule_ready (rtx insn)
2179 /* An interblock motion? */
2180 if (INSN_BB (insn) != target_bb)
2182 if (IS_SPECULATIVE_INSN (insn))
2184 gcc_assert (check_live (insn, INSN_BB (insn)));
2186 update_live (insn, INSN_BB (insn));
2188 /* For speculative load, mark insns fed by it. */
2189 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2190 set_spec_fed (insn);
2198 /* In block motion. */
2199 sched_target_n_insns++;
2204 /* Called after INSN has all its hard dependencies resolved and the speculation
2205 of type TS is enough to overcome them all.
2206 Return nonzero if it should be moved to the ready list or the queue, or zero
2207 if we should silently discard it. */
2209 new_ready (rtx next, ds_t ts)
2211 if (INSN_BB (next) != target_bb)
2213 int not_ex_free = 0;
2215 /* For speculative insns, before inserting to ready/queue,
2216 check live, exception-free, and issue-delay. */
2217 if (!IS_VALID (INSN_BB (next))
2219 || (IS_SPECULATIVE_INSN (next)
2220 && ((recog_memoized (next) >= 0
2221 && min_insn_conflict_delay (curr_state, next, next)
2222 > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
2223 || IS_SPECULATION_CHECK_P (next)
2224 || !check_live (next, INSN_BB (next))
2225 || (not_ex_free = !is_exception_free (next, INSN_BB (next),
2229 /* We are here because is_exception_free () == false.
2230 But we possibly can handle that with control speculation. */
2231 && sched_deps_info->generate_spec_deps
2232 && spec_info->mask & BEGIN_CONTROL)
2236 /* Add control speculation to NEXT's dependency type. */
2237 new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
2239 /* Check if NEXT can be speculated with new dependency type. */
2240 if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
2241 /* Here we got new control-speculative instruction. */
2244 /* NEXT isn't ready yet. */
2248 /* NEXT isn't ready yet. */
2256 /* Return a string that contains the insn uid and optionally anything else
2257 necessary to identify this insn in an output. It's valid to use a
2258 static buffer for this. The ALIGNED parameter should cause the string
2259 to be formatted so that multiple output lines will line up nicely. */
2262 rgn_print_insn (const_rtx insn, int aligned)
2264 static char tmp[80];
2267 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2270 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2271 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2273 sprintf (tmp, "%d", INSN_UID (insn));
2278 /* Compare priority of two insns. Return a positive number if the second
2279 insn is to be preferred for scheduling, and a negative one if the first
2280 is to be preferred. Zero if they are equally good. */
2283 rgn_rank (rtx insn1, rtx insn2)
2285 /* Some comparison make sense in interblock scheduling only. */
2286 if (INSN_BB (insn1) != INSN_BB (insn2))
2288 int spec_val, prob_val;
2290 /* Prefer an inblock motion on an interblock motion. */
2291 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2293 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2296 /* Prefer a useful motion on a speculative one. */
2297 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2301 /* Prefer a more probable (speculative) insn. */
2302 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2309 /* NEXT is an instruction that depends on INSN (a backward dependence);
2310 return nonzero if we should include this dependence in priority
2314 contributes_to_priority (rtx next, rtx insn)
2316 /* NEXT and INSN reside in one ebb. */
2317 return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
2320 /* INSN is a JUMP_INSN. Store the set of registers that must be
2321 considered as used by this jump in USED. */
2324 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
2325 regset used ATTRIBUTE_UNUSED)
2327 /* Nothing to do here, since we postprocess jumps in
2328 add_branch_dependences. */
2331 /* This variable holds common_sched_info hooks and data relevant to
2332 the interblock scheduler. */
2333 static struct common_sched_info_def rgn_common_sched_info;
2336 /* This holds data for the dependence analysis relevant to
2337 the interblock scheduler. */
2338 static struct sched_deps_info_def rgn_sched_deps_info;
2340 /* This holds constant data used for initializing the above structure
2341 for the Haifa scheduler. */
2342 static const struct sched_deps_info_def rgn_const_sched_deps_info =
2344 compute_jump_reg_dependencies,
2345 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2349 /* Same as above, but for the selective scheduler. */
2350 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
2352 compute_jump_reg_dependencies,
2353 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2357 /* Return true if scheduling INSN will trigger finish of scheduling
2360 rgn_insn_finishes_block_p (rtx insn)
2362 if (INSN_BB (insn) == target_bb
2363 && sched_target_n_insns + 1 == target_n_insns)
2364 /* INSN is the last not-scheduled instruction in the current block. */
2370 /* Used in schedule_insns to initialize current_sched_info for scheduling
2371 regions (or single basic blocks). */
2373 static const struct haifa_sched_info rgn_const_sched_info =
2376 can_schedule_ready_p,
2381 contributes_to_priority,
2382 rgn_insn_finishes_block_p,
2388 rgn_add_remove_insn,
2389 begin_schedule_ready,
2396 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2397 for the interblock scheduler frontend. */
2398 static struct haifa_sched_info rgn_sched_info;
2400 /* Returns maximum priority that an insn was assigned to. */
2403 get_rgn_sched_max_insns_priority (void)
2405 return rgn_sched_info.sched_max_insns_priority;
2408 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
2411 sets_likely_spilled (rtx pat)
2414 note_stores (pat, sets_likely_spilled_1, &ret);
2419 sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
2421 bool *ret = (bool *) data;
2423 if (GET_CODE (pat) == SET
2425 && HARD_REGISTER_P (x)
2426 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
2430 /* A bitmap to note insns that participate in any dependency. Used in
2431 add_branch_dependences. */
2432 static sbitmap insn_referenced;
2434 /* Add dependences so that branches are scheduled to run last in their
2437 add_branch_dependences (rtx head, rtx tail)
2441 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2442 that can throw exceptions, force them to remain in order at the end of
2443 the block by adding dependencies and giving the last a high priority.
2444 There may be notes present, and prev_head may also be a note.
2446 Branches must obviously remain at the end. Calls should remain at the
2447 end since moving them results in worse register allocation. Uses remain
2448 at the end to ensure proper register allocation.
2450 cc0 setters remain at the end because they can't be moved away from
2453 Predecessors of SCHED_GROUP_P instructions at the end remain at the end.
2455 COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2457 Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2458 values) are not moved before reload because we can wind up with register
2459 allocation failures. */
2461 while (tail != head && DEBUG_INSN_P (tail))
2462 tail = PREV_INSN (tail);
2466 while (CALL_P (insn)
2467 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
2468 || (NONJUMP_INSN_P (insn)
2469 && (GET_CODE (PATTERN (insn)) == USE
2470 || GET_CODE (PATTERN (insn)) == CLOBBER
2471 || can_throw_internal (insn)
2473 || sets_cc0_p (PATTERN (insn))
2475 || (!reload_completed
2476 && sets_likely_spilled (PATTERN (insn)))))
2478 || (last != 0 && SCHED_GROUP_P (last)))
2483 && sd_find_dep_between (insn, last, false) == NULL)
2485 if (! sched_insns_conditions_mutex_p (last, insn))
2486 add_dependence (last, insn, REG_DEP_ANTI);
2487 bitmap_set_bit (insn_referenced, INSN_LUID (insn));
2490 CANT_MOVE (insn) = 1;
2495 /* Don't overrun the bounds of the basic block. */
2500 insn = PREV_INSN (insn);
2501 while (insn != head && DEBUG_INSN_P (insn));
2504 /* Make sure these insns are scheduled last in their block. */
2507 while (insn != head)
2509 insn = prev_nonnote_insn (insn);
2511 if (bitmap_bit_p (insn_referenced, INSN_LUID (insn))
2512 || DEBUG_INSN_P (insn))
2515 if (! sched_insns_conditions_mutex_p (last, insn))
2516 add_dependence (last, insn, REG_DEP_ANTI);
2519 if (!targetm.have_conditional_execution ())
2522 /* Finally, if the block ends in a jump, and we are doing intra-block
2523 scheduling, make sure that the branch depends on any COND_EXEC insns
2524 inside the block to avoid moving the COND_EXECs past the branch insn.
2526 We only have to do this after reload, because (1) before reload there
2527 are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2528 scheduler after reload.
2530 FIXME: We could in some cases move COND_EXEC insns past the branch if
2531 this scheduler would be a little smarter. Consider this code:
2539 On a target with a one cycle stall on a memory access the optimal
2548 We don't want to put the 'X += 12' before the branch because it just
2549 wastes a cycle of execution time when the branch is taken.
2551 Note that in the example "!C" will always be true. That is another
2552 possible improvement for handling COND_EXECs in this scheduler: it
2553 could remove always-true predicates. */
2555 if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail)))
2559 while (insn != head)
2561 insn = PREV_INSN (insn);
2563 /* Note that we want to add this dependency even when
2564 sched_insns_conditions_mutex_p returns true. The whole point
2565 is that we _want_ this dependency, even if these insns really
2567 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
2568 add_dependence (tail, insn, REG_DEP_ANTI);
2572 /* Data structures for the computation of data dependences in a regions. We
2573 keep one `deps' structure for every basic block. Before analyzing the
2574 data dependences for a bb, its variables are initialized as a function of
2575 the variables of its predecessors. When the analysis for a bb completes,
2576 we save the contents to the corresponding bb_deps[bb] variable. */
2578 static struct deps_desc *bb_deps;
2581 concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
2584 rtx new_insns = *old_insns_p;
2585 rtx new_mems = *old_mems_p;
2589 new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
2590 new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
2591 copy_insns = XEXP (copy_insns, 1);
2592 copy_mems = XEXP (copy_mems, 1);
2595 *old_insns_p = new_insns;
2596 *old_mems_p = new_mems;
2599 /* Join PRED_DEPS to the SUCC_DEPS. */
2601 deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
2604 reg_set_iterator rsi;
2606 /* The reg_last lists are inherited by successor. */
2607 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
2609 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2610 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2612 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2613 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2614 succ_rl->implicit_sets
2615 = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
2616 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2618 succ_rl->uses_length += pred_rl->uses_length;
2619 succ_rl->clobbers_length += pred_rl->clobbers_length;
2621 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2623 /* Mem read/write lists are inherited by successor. */
2624 concat_insn_mem_list (pred_deps->pending_read_insns,
2625 pred_deps->pending_read_mems,
2626 &succ_deps->pending_read_insns,
2627 &succ_deps->pending_read_mems);
2628 concat_insn_mem_list (pred_deps->pending_write_insns,
2629 pred_deps->pending_write_mems,
2630 &succ_deps->pending_write_insns,
2631 &succ_deps->pending_write_mems);
2633 succ_deps->pending_jump_insns
2634 = concat_INSN_LIST (pred_deps->pending_jump_insns,
2635 succ_deps->pending_jump_insns);
2636 succ_deps->last_pending_memory_flush
2637 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2638 succ_deps->last_pending_memory_flush);
2640 succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
2641 succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
2642 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2644 /* last_function_call is inherited by successor. */
2645 succ_deps->last_function_call
2646 = concat_INSN_LIST (pred_deps->last_function_call,
2647 succ_deps->last_function_call);
2649 /* last_function_call_may_noreturn is inherited by successor. */
2650 succ_deps->last_function_call_may_noreturn
2651 = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
2652 succ_deps->last_function_call_may_noreturn);
2654 /* sched_before_next_call is inherited by successor. */
2655 succ_deps->sched_before_next_call
2656 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2657 succ_deps->sched_before_next_call);
2660 /* After computing the dependencies for block BB, propagate the dependencies
2661 found in TMP_DEPS to the successors of the block. */
2663 propagate_deps (int bb, struct deps_desc *pred_deps)
2665 basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
2669 /* bb's structures are inherited by its successors. */
2670 FOR_EACH_EDGE (e, ei, block->succs)
2672 /* Only bbs "below" bb, in the same region, are interesting. */
2673 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2674 || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
2675 || BLOCK_TO_BB (e->dest->index) <= bb)
2678 deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
2681 /* These lists should point to the right place, for correct
2683 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2684 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2685 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2686 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2687 bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns;
2689 /* Can't allow these to be freed twice. */
2690 pred_deps->pending_read_insns = 0;
2691 pred_deps->pending_read_mems = 0;
2692 pred_deps->pending_write_insns = 0;
2693 pred_deps->pending_write_mems = 0;
2694 pred_deps->pending_jump_insns = 0;
2697 /* Compute dependences inside bb. In a multiple blocks region:
2698 (1) a bb is analyzed after its predecessors, and (2) the lists in
2699 effect at the end of bb (after analyzing for bb) are inherited by
2702 Specifically for reg-reg data dependences, the block insns are
2703 scanned by sched_analyze () top-to-bottom. Three lists are
2704 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2705 reg_last[].implicit_sets for implicit hard register DEFs, and
2706 reg_last[].uses for register USEs.
2708 When analysis is completed for bb, we update for its successors:
2709 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2710 ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2711 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2713 The mechanism for computing mem-mem data dependence is very
2714 similar, and the result is interblock dependences in the region. */
2717 compute_block_dependences (int bb)
2720 struct deps_desc tmp_deps;
2722 tmp_deps = bb_deps[bb];
2724 /* Do the analysis for this block. */
2725 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2726 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2728 sched_analyze (&tmp_deps, head, tail);
2730 /* Selective scheduling handles control dependencies by itself. */
2731 if (!sel_sched_p ())
2732 add_branch_dependences (head, tail);
2734 if (current_nr_blocks > 1)
2735 propagate_deps (bb, &tmp_deps);
2737 /* Free up the INSN_LISTs. */
2738 free_deps (&tmp_deps);
2740 if (targetm.sched.dependencies_evaluation_hook)
2741 targetm.sched.dependencies_evaluation_hook (head, tail);
2744 /* Free dependencies of instructions inside BB. */
2746 free_block_dependencies (int bb)
2751 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2753 if (no_real_insns_p (head, tail))
2756 sched_free_deps (head, tail, true);
2759 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2760 them to the unused_*_list variables, so that they can be reused. */
2763 free_pending_lists (void)
2767 for (bb = 0; bb < current_nr_blocks; bb++)
2769 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2770 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2771 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2772 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2773 free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns);
2777 /* Print dependences for debugging starting from FROM_BB.
2778 Callable from debugger. */
2779 /* Print dependences for debugging starting from FROM_BB.
2780 Callable from debugger. */
2782 debug_rgn_dependencies (int from_bb)
2786 fprintf (sched_dump,
2787 ";; --------------- forward dependences: ------------ \n");
2789 for (bb = from_bb; bb < current_nr_blocks; bb++)
2793 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2794 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2795 BB_TO_BLOCK (bb), bb);
2797 debug_dependencies (head, tail);
2801 /* Print dependencies information for instructions between HEAD and TAIL.
2802 ??? This function would probably fit best in haifa-sched.c. */
2803 void debug_dependencies (rtx head, rtx tail)
2806 rtx next_tail = NEXT_INSN (tail);
2808 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2809 "insn", "code", "bb", "dep", "prio", "cost",
2811 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2812 "----", "----", "--", "---", "----", "----",
2815 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2817 if (! INSN_P (insn))
2820 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2823 n = NOTE_KIND (insn);
2824 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2827 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2831 fprintf (sched_dump,
2832 ";; %s%5d%6d%6d%6d%6d%6d ",
2833 (SCHED_GROUP_P (insn) ? "+" : " "),
2837 sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
2838 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2839 : INSN_PRIORITY (insn))
2840 : INSN_PRIORITY (insn)),
2841 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2843 : insn_cost (insn)));
2845 if (recog_memoized (insn) < 0)
2846 fprintf (sched_dump, "nothing");
2848 print_reservation (sched_dump, insn);
2850 fprintf (sched_dump, "\t: ");
2852 sd_iterator_def sd_it;
2855 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
2856 fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)),
2857 DEP_NONREG (dep) ? "n" : "",
2858 DEP_MULTIPLE (dep) ? "m" : "");
2860 fprintf (sched_dump, "\n");
2863 fprintf (sched_dump, "\n");
2866 /* Returns true if all the basic blocks of the current region have
2867 NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
2869 sched_is_disabled_for_current_region_p (void)
2873 for (bb = 0; bb < current_nr_blocks; bb++)
2874 if (!(BASIC_BLOCK_FOR_FN (cfun,
2875 BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
2881 /* Free all region dependencies saved in INSN_BACK_DEPS and
2882 INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
2883 when scheduling, so this function is supposed to be called from
2884 the selective scheduling only. */
2886 free_rgn_deps (void)
2890 for (bb = 0; bb < current_nr_blocks; bb++)
2894 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2895 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2897 sched_free_deps (head, tail, false);
2901 static int rgn_n_insns;
2903 /* Compute insn priority for a current region. */
2905 compute_priorities (void)
2909 current_sched_info->sched_max_insns_priority = 0;
2910 for (bb = 0; bb < current_nr_blocks; bb++)
2914 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2915 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2917 if (no_real_insns_p (head, tail))
2920 rgn_n_insns += set_priorities (head, tail);
2922 current_sched_info->sched_max_insns_priority++;
2925 /* (Re-)initialize the arrays of DFA states at the end of each basic block.
2927 SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be
2928 zero for the first call to this function, to allocate the arrays for the
2931 This function is called once during initialization of the scheduler, and
2932 called again to resize the arrays if new basic blocks have been created,
2933 for example for speculation recovery code. */
2936 realloc_bb_state_array (int saved_last_basic_block)
2938 char *old_bb_state_array = bb_state_array;
2939 size_t lbb = (size_t) last_basic_block;
2940 size_t slbb = (size_t) saved_last_basic_block;
2942 /* Nothing to do if nothing changed since the last time this was called. */
2943 if (saved_last_basic_block == last_basic_block)
2946 /* The selective scheduler doesn't use the state arrays. */
2949 gcc_assert (bb_state_array == NULL && bb_state == NULL);
2953 gcc_checking_assert (saved_last_basic_block == 0
2954 || (bb_state_array != NULL && bb_state != NULL));
2956 bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size);
2957 bb_state = XRESIZEVEC (state_t, bb_state, lbb);
2959 /* If BB_STATE_ARRAY has moved, fixup all the state pointers array.
2960 Otherwise only fixup the newly allocated ones. For the state
2961 array itself, only initialize the new entries. */
2962 bool bb_state_array_moved = (bb_state_array != old_bb_state_array);
2963 for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++)
2964 bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size);
2965 for (size_t i = slbb; i < lbb; i++)
2966 state_reset (bb_state[i]);
2969 /* Free the arrays of DFA states at the end of each basic block. */
2972 free_bb_state_array (void)
2974 free (bb_state_array);
2976 bb_state_array = NULL;
2980 /* Schedule a region. A region is either an inner loop, a loop-free
2981 subroutine, or a single basic block. Each bb in the region is
2982 scheduled after its flow predecessors. */
2985 schedule_region (int rgn)
2988 int sched_rgn_n_insns = 0;
2992 rgn_setup_region (rgn);
2994 /* Don't schedule region that is marked by
2995 NOTE_DISABLE_SCHED_OF_BLOCK. */
2996 if (sched_is_disabled_for_current_region_p ())
2999 sched_rgn_compute_dependencies (rgn);
3001 sched_rgn_local_init (rgn);
3003 /* Set priorities. */
3004 compute_priorities ();
3006 sched_extend_ready_list (rgn_n_insns);
3008 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3010 sched_init_region_reg_pressure_info ();
3011 for (bb = 0; bb < current_nr_blocks; bb++)
3013 basic_block first_bb, last_bb;
3016 first_bb = EBB_FIRST_BB (bb);
3017 last_bb = EBB_LAST_BB (bb);
3019 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3021 if (no_real_insns_p (head, tail))
3023 gcc_assert (first_bb == last_bb);
3026 sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
3030 /* Now we can schedule all blocks. */
3031 for (bb = 0; bb < current_nr_blocks; bb++)
3033 basic_block first_bb, last_bb, curr_bb;
3036 first_bb = EBB_FIRST_BB (bb);
3037 last_bb = EBB_LAST_BB (bb);
3039 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3041 if (no_real_insns_p (head, tail))
3043 gcc_assert (first_bb == last_bb);
3047 current_sched_info->prev_head = PREV_INSN (head);
3048 current_sched_info->next_tail = NEXT_INSN (tail);
3050 remove_notes (head, tail);
3052 unlink_bb_notes (first_bb, last_bb);
3056 gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
3057 current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
3060 if (dbg_cnt (sched_block))
3063 int saved_last_basic_block = last_basic_block;
3065 schedule_block (&curr_bb, bb_state[first_bb->index]);
3066 gcc_assert (EBB_FIRST_BB (bb) == first_bb);
3067 sched_rgn_n_insns += sched_n_insns;
3068 realloc_bb_state_array (saved_last_basic_block);
3069 f = find_fallthru_edge (last_bb->succs);
3070 if (f && f->probability * 100 / REG_BR_PROB_BASE >=
3071 PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF))
3073 memcpy (bb_state[f->dest->index], curr_state,
3075 if (sched_verbose >= 5)
3076 fprintf (sched_dump, "saving state for edge %d->%d\n",
3077 f->src->index, f->dest->index);
3082 sched_rgn_n_insns += rgn_n_insns;
3086 if (current_nr_blocks > 1)
3090 /* Sanity check: verify that all region insns were scheduled. */
3091 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
3093 sched_finish_ready_list ();
3095 /* Done with this region. */
3096 sched_rgn_local_finish ();
3098 /* Free dependencies. */
3099 for (bb = 0; bb < current_nr_blocks; ++bb)
3100 free_block_dependencies (bb);
3102 gcc_assert (haifa_recovery_bb_ever_added_p
3103 || deps_pools_are_empty_p ());
3106 /* Initialize data structures for region scheduling. */
3109 sched_rgn_init (bool single_blocks_p)
3111 min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
3119 CONTAINING_RGN (ENTRY_BLOCK) = -1;
3120 CONTAINING_RGN (EXIT_BLOCK) = -1;
3122 realloc_bb_state_array (0);
3124 /* Compute regions for scheduling. */
3126 || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1
3127 || !flag_schedule_interblock
3128 || is_cfg_nonregular ())
3130 find_single_block_region (sel_sched_p ());
3134 /* Compute the dominators and post dominators. */
3135 if (!sel_sched_p ())
3136 calculate_dominance_info (CDI_DOMINATORS);
3141 if (sched_verbose >= 3)
3144 /* For now. This will move as more and more of haifa is converted
3145 to using the cfg code. */
3146 if (!sel_sched_p ())
3147 free_dominance_info (CDI_DOMINATORS);
3150 gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun));
3152 RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
3153 RGN_NR_BLOCKS (nr_regions - 1));
3156 /* Free data structures for region scheduling. */
3158 sched_rgn_finish (void)
3160 free_bb_state_array ();
3162 /* Reposition the prologue and epilogue notes in case we moved the
3163 prologue/epilogue insns. */
3164 if (reload_completed)
3165 reposition_prologue_and_epilogue_notes ();
3169 if (reload_completed == 0
3170 && flag_schedule_interblock)
3172 fprintf (sched_dump,
3173 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3177 gcc_assert (nr_inter <= 0);
3178 fprintf (sched_dump, "\n\n");
3186 free (rgn_bb_table);
3187 rgn_bb_table = NULL;
3192 free (containing_rgn);
3193 containing_rgn = NULL;
3199 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3200 point to the region RGN. */
3202 rgn_setup_region (int rgn)
3206 /* Set variables for the current region. */
3207 current_nr_blocks = RGN_NR_BLOCKS (rgn);
3208 current_blocks = RGN_BLOCKS (rgn);
3210 /* EBB_HEAD is a region-scope structure. But we realloc it for
3211 each region to save time/memory/something else.
3212 See comments in add_block1, for what reasons we allocate +1 element. */
3213 ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
3214 for (bb = 0; bb <= current_nr_blocks; bb++)
3215 ebb_head[bb] = current_blocks + bb;
3218 /* Compute instruction dependencies in region RGN. */
3220 sched_rgn_compute_dependencies (int rgn)
3222 if (!RGN_DONT_CALC_DEPS (rgn))
3227 sched_emulate_haifa_p = 1;
3229 init_deps_global ();
3231 /* Initializations for region data dependence analysis. */
3232 bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
3233 for (bb = 0; bb < current_nr_blocks; bb++)
3234 init_deps (bb_deps + bb, false);
3236 /* Initialize bitmap used in add_branch_dependences. */
3237 insn_referenced = sbitmap_alloc (sched_max_luid);
3238 bitmap_clear (insn_referenced);
3240 /* Compute backward dependencies. */
3241 for (bb = 0; bb < current_nr_blocks; bb++)
3242 compute_block_dependences (bb);
3244 sbitmap_free (insn_referenced);
3245 free_pending_lists ();
3246 finish_deps_global ();
3249 /* We don't want to recalculate this twice. */
3250 RGN_DONT_CALC_DEPS (rgn) = 1;
3253 sched_emulate_haifa_p = 0;
3256 /* (This is a recovery block. It is always a single block region.)
3257 OR (We use selective scheduling.) */
3258 gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
3261 /* Init region data structures. Returns true if this region should
3262 not be scheduled. */
3264 sched_rgn_local_init (int rgn)
3268 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
3269 if (current_nr_blocks > 1)
3275 prob = XNEWVEC (int, current_nr_blocks);
3277 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
3278 bitmap_vector_clear (dom, current_nr_blocks);
3280 /* Use ->aux to implement EDGE_TO_BIT mapping. */
3284 if (CONTAINING_RGN (block->index) != rgn)
3286 FOR_EACH_EDGE (e, ei, block->succs)
3287 SET_EDGE_TO_BIT (e, rgn_nr_edges++);
3290 rgn_edges = XNEWVEC (edge, rgn_nr_edges);
3294 if (CONTAINING_RGN (block->index) != rgn)
3296 FOR_EACH_EDGE (e, ei, block->succs)
3297 rgn_edges[rgn_nr_edges++] = e;
3301 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3302 bitmap_vector_clear (pot_split, current_nr_blocks);
3303 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3304 bitmap_vector_clear (ancestor_edges, current_nr_blocks);
3306 /* Compute probabilities, dominators, split_edges. */
3307 for (bb = 0; bb < current_nr_blocks; bb++)
3308 compute_dom_prob_ps (bb);
3310 /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
3311 /* We don't need them anymore. But we want to avoid duplication of
3312 aux fields in the newly created edges. */
3315 if (CONTAINING_RGN (block->index) != rgn)
3317 FOR_EACH_EDGE (e, ei, block->succs)
3323 /* Free data computed for the finished region. */
3325 sched_rgn_local_free (void)
3328 sbitmap_vector_free (dom);
3329 sbitmap_vector_free (pot_split);
3330 sbitmap_vector_free (ancestor_edges);
3334 /* Free data computed for the finished region. */
3336 sched_rgn_local_finish (void)
3338 if (current_nr_blocks > 1 && !sel_sched_p ())
3340 sched_rgn_local_free ();
3344 /* Setup scheduler infos. */
3346 rgn_setup_common_sched_info (void)
3348 memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
3349 sizeof (rgn_common_sched_info));
3351 rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
3352 rgn_common_sched_info.add_block = rgn_add_block;
3353 rgn_common_sched_info.estimate_number_of_insns
3354 = rgn_estimate_number_of_insns;
3355 rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
3357 common_sched_info = &rgn_common_sched_info;
3360 /* Setup all *_sched_info structures (for the Haifa frontend
3361 and for the dependence analysis) in the interblock scheduler. */
3363 rgn_setup_sched_infos (void)
3365 if (!sel_sched_p ())
3366 memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
3367 sizeof (rgn_sched_deps_info));
3369 memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
3370 sizeof (rgn_sched_deps_info));
3372 sched_deps_info = &rgn_sched_deps_info;
3374 memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
3375 current_sched_info = &rgn_sched_info;
3378 /* The one entry point in this file. */
3380 schedule_insns (void)
3384 /* Taking care of this degenerate case makes the rest of
3385 this code simpler. */
3386 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
3389 rgn_setup_common_sched_info ();
3390 rgn_setup_sched_infos ();
3392 haifa_sched_init ();
3393 sched_rgn_init (reload_completed);
3395 bitmap_initialize (¬_in_df, 0);
3396 bitmap_clear (¬_in_df);
3398 /* Schedule every region in the subroutine. */
3399 for (rgn = 0; rgn < nr_regions; rgn++)
3400 if (dbg_cnt (sched_region))
3401 schedule_region (rgn);
3404 sched_rgn_finish ();
3405 bitmap_clear (¬_in_df);
3407 haifa_sched_finish ();
3410 /* INSN has been added to/removed from current region. */
3412 rgn_add_remove_insn (rtx insn, int remove_p)
3419 if (INSN_BB (insn) == target_bb)
3428 /* Extend internal data structures. */
3430 extend_regions (void)
3432 rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
3433 rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks_for_fn (cfun));
3434 block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
3435 containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
3439 rgn_make_new_region_out_of_new_block (basic_block bb)
3443 i = RGN_BLOCKS (nr_regions);
3444 /* I - first free position in rgn_bb_table. */
3446 rgn_bb_table[i] = bb->index;
3447 RGN_NR_BLOCKS (nr_regions) = 1;
3448 RGN_HAS_REAL_EBB (nr_regions) = 0;
3449 RGN_DONT_CALC_DEPS (nr_regions) = 0;
3450 CONTAINING_RGN (bb->index) = nr_regions;
3451 BLOCK_TO_BB (bb->index) = 0;
3455 RGN_BLOCKS (nr_regions) = i + 1;
3458 /* BB was added to ebb after AFTER. */
3460 rgn_add_block (basic_block bb, basic_block after)
3463 bitmap_set_bit (¬_in_df, bb->index);
3465 if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun))
3467 rgn_make_new_region_out_of_new_block (bb);
3468 RGN_DONT_CALC_DEPS (nr_regions - 1) = (after
3469 == EXIT_BLOCK_PTR_FOR_FN (cfun));
3475 /* We need to fix rgn_table, block_to_bb, containing_rgn
3478 BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
3480 /* We extend ebb_head to one more position to
3481 easily find the last position of the last ebb in
3482 the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3483 is _always_ valid for access. */
3485 i = BLOCK_TO_BB (after->index) + 1;
3486 pos = ebb_head[i] - 1;
3487 /* Now POS is the index of the last block in the region. */
3489 /* Find index of basic block AFTER. */
3490 for (; rgn_bb_table[pos] != after->index; pos--)
3494 gcc_assert (pos > ebb_head[i - 1]);
3496 /* i - ebb right after "AFTER". */
3497 /* ebb_head[i] - VALID. */
3499 /* Source position: ebb_head[i]
3500 Destination position: ebb_head[i] + 1
3502 RGN_BLOCKS (nr_regions) - 1
3503 Number of elements to copy: (last_position) - (source_position) + 1
3506 memmove (rgn_bb_table + pos + 1,
3508 ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
3509 * sizeof (*rgn_bb_table));
3511 rgn_bb_table[pos] = bb->index;
3513 for (; i <= current_nr_blocks; i++)
3516 i = CONTAINING_RGN (after->index);
3517 CONTAINING_RGN (bb->index) = i;
3519 RGN_HAS_REAL_EBB (i) = 1;
3521 for (++i; i <= nr_regions; i++)
3526 /* Fix internal data after interblock movement of jump instruction.
3527 For parameter meaning please refer to
3528 sched-int.h: struct sched_info: fix_recovery_cfg. */
3530 rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
3532 int old_pos, new_pos, i;
3534 BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
3536 for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
3537 rgn_bb_table[old_pos] != check_bb_nexti;
3540 gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
3542 for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
3543 rgn_bb_table[new_pos] != bbi;
3547 gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
3549 gcc_assert (new_pos < old_pos);
3551 memmove (rgn_bb_table + new_pos + 1,
3552 rgn_bb_table + new_pos,
3553 (old_pos - new_pos) * sizeof (*rgn_bb_table));
3555 rgn_bb_table[new_pos] = check_bb_nexti;
3557 for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
3561 /* Return next block in ebb chain. For parameter meaning please refer to
3562 sched-int.h: struct sched_info: advance_target_bb. */
3564 advance_target_bb (basic_block bb, rtx insn)
3569 gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
3570 && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
3577 gate_handle_live_range_shrinkage (void)
3579 #ifdef INSN_SCHEDULING
3580 return flag_live_range_shrinkage;
3586 /* Run instruction scheduler. */
3588 rest_of_handle_live_range_shrinkage (void)
3590 #ifdef INSN_SCHEDULING
3593 initialize_live_range_shrinkage ();
3594 saved = flag_schedule_interblock;
3595 flag_schedule_interblock = false;
3597 flag_schedule_interblock = saved;
3598 finish_live_range_shrinkage ();
3604 gate_handle_sched (void)
3606 #ifdef INSN_SCHEDULING
3607 return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func);
3613 /* Run instruction scheduler. */
3615 rest_of_handle_sched (void)
3617 #ifdef INSN_SCHEDULING
3618 if (flag_selective_scheduling
3619 && ! maybe_skip_selective_scheduling ())
3620 run_selective_scheduling ();
3628 gate_handle_sched2 (void)
3630 #ifdef INSN_SCHEDULING
3631 return optimize > 0 && flag_schedule_insns_after_reload
3632 && !targetm.delay_sched2 && dbg_cnt (sched2_func);
3638 /* Run second scheduling pass after reload. */
3640 rest_of_handle_sched2 (void)
3642 #ifdef INSN_SCHEDULING
3643 if (flag_selective_scheduling2
3644 && ! maybe_skip_selective_scheduling ())
3645 run_selective_scheduling ();
3648 /* Do control and data sched analysis again,
3649 and write some more of the results to dump file. */
3650 if (flag_sched2_use_superblocks)
3661 const pass_data pass_data_live_range_shrinkage =
3663 RTL_PASS, /* type */
3664 "lr_shrinkage", /* name */
3665 OPTGROUP_NONE, /* optinfo_flags */
3666 true, /* has_gate */
3667 true, /* has_execute */
3668 TV_LIVE_RANGE_SHRINKAGE, /* tv_id */
3669 0, /* properties_required */
3670 0, /* properties_provided */
3671 0, /* properties_destroyed */
3672 0, /* todo_flags_start */
3673 ( TODO_df_finish | TODO_verify_rtl_sharing
3674 | TODO_verify_flow ), /* todo_flags_finish */
3677 class pass_live_range_shrinkage : public rtl_opt_pass
3680 pass_live_range_shrinkage(gcc::context *ctxt)
3681 : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt)
3684 /* opt_pass methods: */
3685 bool gate () { return gate_handle_live_range_shrinkage (); }
3686 unsigned int execute () { return rest_of_handle_live_range_shrinkage (); }
3688 }; // class pass_live_range_shrinkage
3693 make_pass_live_range_shrinkage (gcc::context *ctxt)
3695 return new pass_live_range_shrinkage (ctxt);
3700 const pass_data pass_data_sched =
3702 RTL_PASS, /* type */
3703 "sched1", /* name */
3704 OPTGROUP_NONE, /* optinfo_flags */
3705 true, /* has_gate */
3706 true, /* has_execute */
3707 TV_SCHED, /* tv_id */
3708 0, /* properties_required */
3709 0, /* properties_provided */
3710 0, /* properties_destroyed */
3711 0, /* todo_flags_start */
3712 ( TODO_df_finish | TODO_verify_rtl_sharing
3713 | TODO_verify_flow ), /* todo_flags_finish */
3716 class pass_sched : public rtl_opt_pass
3719 pass_sched (gcc::context *ctxt)
3720 : rtl_opt_pass (pass_data_sched, ctxt)
3723 /* opt_pass methods: */
3724 bool gate () { return gate_handle_sched (); }
3725 unsigned int execute () { return rest_of_handle_sched (); }
3727 }; // class pass_sched
3732 make_pass_sched (gcc::context *ctxt)
3734 return new pass_sched (ctxt);
3739 const pass_data pass_data_sched2 =
3741 RTL_PASS, /* type */
3742 "sched2", /* name */
3743 OPTGROUP_NONE, /* optinfo_flags */
3744 true, /* has_gate */
3745 true, /* has_execute */
3746 TV_SCHED2, /* tv_id */
3747 0, /* properties_required */
3748 0, /* properties_provided */
3749 0, /* properties_destroyed */
3750 0, /* todo_flags_start */
3751 ( TODO_df_finish | TODO_verify_rtl_sharing
3752 | TODO_verify_flow ), /* todo_flags_finish */
3755 class pass_sched2 : public rtl_opt_pass
3758 pass_sched2 (gcc::context *ctxt)
3759 : rtl_opt_pass (pass_data_sched2, ctxt)
3762 /* opt_pass methods: */
3763 bool gate () { return gate_handle_sched2 (); }
3764 unsigned int execute () { return rest_of_handle_sched2 (); }
3766 }; // class pass_sched2
3771 make_pass_sched2 (gcc::context *ctxt)
3773 return new pass_sched2 (ctxt);