1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 succesor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
41 #include "insn-config.h"
48 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
49 static bool try_crossjump_bb PARAMS ((int, basic_block));
50 static bool outgoing_edges_match PARAMS ((basic_block, basic_block));
51 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
54 static bool delete_unreachable_blocks PARAMS ((void));
55 static int tail_recursion_label_p PARAMS ((rtx));
56 static int merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
58 static int merge_blocks_move_successor_nojumps PARAMS ((basic_block,
60 static int merge_blocks PARAMS ((edge,basic_block,basic_block,
62 static bool try_optimize_cfg PARAMS ((int));
63 static bool try_simplify_condjump PARAMS ((basic_block));
64 static bool try_forward_edges PARAMS ((int, basic_block));
66 /* Simplify a conditional jump around an unconditional jump.
67 Return true if something changed. */
70 try_simplify_condjump (cbranch_block)
71 basic_block cbranch_block;
73 basic_block jump_block, jump_dest_block, cbranch_dest_block;
74 edge cbranch_jump_edge, cbranch_fallthru_edge;
77 /* Verify that there are exactly two successors. */
78 if (!cbranch_block->succ
79 || !cbranch_block->succ->succ_next
80 || cbranch_block->succ->succ_next->succ_next)
83 /* Verify that we've got a normal conditional branch at the end
85 cbranch_insn = cbranch_block->end;
86 if (!any_condjump_p (cbranch_insn))
89 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
90 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
92 /* The next block must not have multiple predecessors, must not
93 be the last block in the function, and must contain just the
94 unconditional jump. */
95 jump_block = cbranch_fallthru_edge->dest;
96 if (jump_block->pred->pred_next
97 || jump_block->index == n_basic_blocks - 1
98 || !forwarder_block_p (jump_block))
100 jump_dest_block = jump_block->succ->dest;
102 /* The conditional branch must target the block after the
103 unconditional branch. */
104 cbranch_dest_block = cbranch_jump_edge->dest;
106 if (!can_fallthru (jump_block, cbranch_dest_block))
109 /* Invert the conditional branch. Prevent jump.c from deleting
110 "unreachable" instructions. */
111 LABEL_NUSES (JUMP_LABEL (cbranch_insn))++;
112 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 1))
114 LABEL_NUSES (JUMP_LABEL (cbranch_insn))--;
119 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
120 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
122 /* Success. Update the CFG to match. Note that after this point
123 the edge variable names appear backwards; the redirection is done
124 this way to preserve edge profile data. */
125 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
127 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
129 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
130 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
132 /* Delete the block with the unconditional jump, and clean up the mess. */
133 flow_delete_block (jump_block);
134 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
139 /* Attempt to forward edges leaving basic block B.
140 Return true if sucessful. */
143 try_forward_edges (mode, b)
147 bool changed = false;
150 for (e = b->succ; e ; e = next)
152 basic_block target, first;
157 /* Skip complex edges because we don't know how to update them.
159 Still handle fallthru edges, as we can suceed to forward fallthru
160 edge to the same place as the branch edge of conditional branch
161 and turn conditional branch to an unconditonal branch. */
162 if (e->flags & EDGE_COMPLEX)
165 target = first = e->dest;
168 /* Look for the real destination of the jump.
169 Avoid inifinite loop in the infinite empty loop by counting
170 up to n_basic_blocks. */
171 while (forwarder_block_p (target)
172 && target->succ->dest != EXIT_BLOCK_PTR
173 && counter < n_basic_blocks)
175 /* Bypass trivial infinite loops. */
176 if (target == target->succ->dest)
177 counter = n_basic_blocks;
179 /* Avoid killing of loop pre-headers, as it is the place loop
180 optimizer wants to hoist code to.
182 For fallthru forwarders, the LOOP_BEG note must appear between
183 the header of block and CODE_LABEL of the loop, for non forwarders
184 it must appear before the JUMP_INSN. */
185 if (mode & CLEANUP_PRE_LOOP)
187 rtx insn = (target->succ->flags & EDGE_FALLTHRU
188 ? target->head : prev_nonnote_insn (target->end));
190 if (GET_CODE (insn) != NOTE)
191 insn = NEXT_INSN (insn);
193 for (;insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
194 insn = NEXT_INSN (insn))
195 if (GET_CODE (insn) == NOTE
196 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
199 if (GET_CODE (insn) == NOTE)
202 target = target->succ->dest, counter++;
205 if (counter >= n_basic_blocks)
208 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
211 else if (target == first)
212 ; /* We didn't do anything. */
215 /* Save the values now, as the edge may get removed. */
216 gcov_type edge_count = e->count;
217 int edge_probability = e->probability;
219 if (redirect_edge_and_branch (e, target))
221 /* We successfully forwarded the edge. Now update profile
222 data: for each edge we traversed in the chain, remove
223 the original edge's execution count. */
224 int edge_frequency = ((edge_probability * b->frequency
225 + REG_BR_PROB_BASE / 2)
230 first->count -= edge_count;
231 first->succ->count -= edge_count;
232 first->frequency -= edge_frequency;
233 first = first->succ->dest;
235 while (first != target);
242 fprintf (rtl_dump_file, "Forwarding edge %i->%i to %i failed.\n",
243 b->index, e->dest->index, target->index);
252 tail_recursion_label_p (label)
257 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
258 if (label == XEXP (x, 0))
264 /* Blocks A and B are to be merged into a single block. A has no incoming
265 fallthru edge, so it can be moved before B without adding or modifying
266 any jumps (aside from the jump from A to B). */
269 merge_blocks_move_predecessor_nojumps (a, b)
275 barrier = next_nonnote_insn (a->end);
276 if (GET_CODE (barrier) != BARRIER)
278 flow_delete_insn (barrier);
280 /* Move block and loop notes out of the chain so that we do not
283 ??? A better solution would be to squeeze out all the non-nested notes
284 and adjust the block trees appropriately. Even better would be to have
285 a tighter connection between block trees and rtl so that this is not
287 squeeze_notes (&a->head, &a->end);
289 /* Scramble the insn chain. */
290 if (a->end != PREV_INSN (b->head))
291 reorder_insns (a->head, a->end, PREV_INSN (b->head));
295 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
299 /* Swap the records for the two blocks around. Although we are deleting B,
300 A is now where B was and we want to compact the BB array from where
302 BASIC_BLOCK (a->index) = b;
303 BASIC_BLOCK (b->index) = a;
308 /* Now blocks A and B are contiguous. Merge them. */
309 merge_blocks_nomove (a, b);
314 /* Blocks A and B are to be merged into a single block. B has no outgoing
315 fallthru edge, so it can be moved after A without adding or modifying
316 any jumps (aside from the jump from A to B). */
319 merge_blocks_move_successor_nojumps (a, b)
324 barrier = NEXT_INSN (b->end);
326 /* Recognize a jump table following block B. */
328 && GET_CODE (barrier) == CODE_LABEL
329 && NEXT_INSN (barrier)
330 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
331 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
332 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
334 b->end = NEXT_INSN (barrier);
335 barrier = NEXT_INSN (b->end);
338 /* There had better have been a barrier there. Delete it. */
339 if (barrier && GET_CODE (barrier) == BARRIER)
340 flow_delete_insn (barrier);
342 /* Move block and loop notes out of the chain so that we do not
345 ??? A better solution would be to squeeze out all the non-nested notes
346 and adjust the block trees appropriately. Even better would be to have
347 a tighter connection between block trees and rtl so that this is not
349 squeeze_notes (&b->head, &b->end);
351 /* Scramble the insn chain. */
352 reorder_insns (b->head, b->end, a->end);
354 /* Now blocks A and B are contiguous. Merge them. */
355 merge_blocks_nomove (a, b);
359 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
366 /* Attempt to merge basic blocks that are potentially non-adjacent.
367 Return true iff the attempt succeeded. */
370 merge_blocks (e, b, c, mode)
375 /* If C has a tail recursion label, do not merge. There is no
376 edge recorded from the call_placeholder back to this label, as
377 that would make optimize_sibling_and_tail_recursive_calls more
378 complex for no gain. */
379 if (GET_CODE (c->head) == CODE_LABEL
380 && tail_recursion_label_p (c->head))
383 /* If B has a fallthru edge to C, no need to move anything. */
384 if (e->flags & EDGE_FALLTHRU)
386 merge_blocks_nomove (b, c);
390 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
396 /* Otherwise we will need to move code around. Do that only if expensive
397 transformations are allowed. */
398 else if (mode & CLEANUP_EXPENSIVE)
400 edge tmp_edge, c_fallthru_edge;
401 int c_has_outgoing_fallthru;
402 int b_has_incoming_fallthru;
404 /* Avoid overactive code motion, as the forwarder blocks should be
405 eliminated by edge redirection instead. One exception might have
406 been if B is a forwarder block and C has no fallthru edge, but
407 that should be cleaned up by bb-reorder instead. */
408 if (forwarder_block_p (b) || forwarder_block_p (c))
411 /* We must make sure to not munge nesting of lexical blocks,
412 and loop notes. This is done by squeezing out all the notes
413 and leaving them there to lie. Not ideal, but functional. */
415 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
416 if (tmp_edge->flags & EDGE_FALLTHRU)
418 c_has_outgoing_fallthru = (tmp_edge != NULL);
419 c_fallthru_edge = tmp_edge;
421 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
422 if (tmp_edge->flags & EDGE_FALLTHRU)
424 b_has_incoming_fallthru = (tmp_edge != NULL);
426 /* If B does not have an incoming fallthru, then it can be moved
427 immediately before C without introducing or modifying jumps.
428 C cannot be the first block, so we do not have to worry about
429 accessing a non-existent block. */
430 if (! b_has_incoming_fallthru)
431 return merge_blocks_move_predecessor_nojumps (b, c);
433 /* Otherwise, we're going to try to move C after B. If C does
434 not have an outgoing fallthru, then it can be moved
435 immediately after B without introducing or modifying jumps. */
436 if (! c_has_outgoing_fallthru)
437 return merge_blocks_move_successor_nojumps (b, c);
439 /* Otherwise, we'll need to insert an extra jump, and possibly
440 a new block to contain it. We can't redirect to EXIT_BLOCK_PTR,
441 as we don't have explicit return instructions before epilogues
442 are generated, so give up on that case. */
444 if (c_fallthru_edge->dest != EXIT_BLOCK_PTR
445 && merge_blocks_move_successor_nojumps (b, c))
447 basic_block target = c_fallthru_edge->dest;
451 /* This is a dirty hack to avoid code duplication.
453 Set edge to point to wrong basic block, so
454 redirect_edge_and_branch_force will do the trick
455 and rewire edge back to the original location. */
456 redirect_edge_succ (c_fallthru_edge, ENTRY_BLOCK_PTR);
457 new = redirect_edge_and_branch_force (c_fallthru_edge, target);
459 /* We've just created barrier, but another barrier is
460 already present in the stream. Avoid the duplicate. */
461 barrier = next_nonnote_insn (new ? new->end : b->end);
462 if (GET_CODE (barrier) != BARRIER)
464 flow_delete_insn (barrier);
474 /* Look through the insns at the end of BB1 and BB2 and find the longest
475 sequence that are equivalent. Store the first insns for that sequence
476 in *F1 and *F2 and return the sequence length.
478 To simplify callers of this function, if the blocks match exactly,
479 store the head of the blocks in *F1 and *F2. */
482 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
483 int mode ATTRIBUTE_UNUSED;
484 basic_block bb1, bb2;
487 rtx i1, i2, p1, p2, last1, last2, afterlast1, afterlast2;
490 /* Skip simple jumps at the end of the blocks. Complex jumps still
491 need to be compared for equivalence, which we'll do below. */
495 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
499 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
502 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
506 while ((GET_CODE (i1) == NOTE && i1 != bb1->head))
508 while ((GET_CODE (i2) == NOTE && i2 != bb2->head))
511 if (i1 == bb1->head || i2 == bb2->head)
514 /* Verify that I1 and I2 are equivalent. */
516 if (GET_CODE (i1) != GET_CODE (i2))
522 /* If this is a CALL_INSN, compare register usage information.
523 If we don't check this on stack register machines, the two
524 CALL_INSNs might be merged leaving reg-stack.c with mismatching
525 numbers of stack registers in the same basic block.
526 If we don't check this on machines with delay slots, a delay slot may
527 be filled that clobbers a parameter expected by the subroutine.
529 ??? We take the simple route for now and assume that if they're
530 equal, they were constructed identically. */
532 if (GET_CODE (i1) == CALL_INSN
533 && ! rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
534 CALL_INSN_FUNCTION_USAGE (i2)))
538 /* If cross_jump_death_matters is not 0, the insn's mode
539 indicates whether or not the insn contains any stack-like
542 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
544 /* If register stack conversion has already been done, then
545 death notes must also be compared before it is certain that
546 the two instruction streams match. */
549 HARD_REG_SET i1_regset, i2_regset;
551 CLEAR_HARD_REG_SET (i1_regset);
552 CLEAR_HARD_REG_SET (i2_regset);
554 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
555 if (REG_NOTE_KIND (note) == REG_DEAD
556 && STACK_REG_P (XEXP (note, 0)))
557 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
559 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
560 if (REG_NOTE_KIND (note) == REG_DEAD
561 && STACK_REG_P (XEXP (note, 0)))
562 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
564 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
573 if (GET_CODE (p1) != GET_CODE (p2))
576 if (! rtx_renumbered_equal_p (p1, p2))
578 /* The following code helps take care of G++ cleanups. */
579 rtx equiv1 = find_reg_equal_equiv_note (i1);
580 rtx equiv2 = find_reg_equal_equiv_note (i2);
583 /* If the equivalences are not to a constant, they may
584 reference pseudos that no longer exist, so we can't
586 && CONSTANT_P (XEXP (equiv1, 0))
587 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
589 rtx s1 = single_set (i1);
590 rtx s2 = single_set (i2);
591 if (s1 != 0 && s2 != 0
592 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
594 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
595 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
596 if (! rtx_renumbered_equal_p (p1, p2))
598 else if (apply_change_group ())
606 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
607 if (GET_CODE (p1) != USE && GET_CODE (p1) != CLOBBER)
609 afterlast1 = last1, afterlast2 = last2;
610 last1 = i1, last2 = i2;
620 /* Don't allow the insn after a compare to be shared by
621 cross-jumping unless the compare is also shared. */
622 if (reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
623 last1 = afterlast1, last2 = afterlast2, ninsns--;
627 /* Include preceeding notes and labels in the cross-jump. One,
628 this may bring us to the head of the blocks as requested above.
629 Two, it keeps line number notes as matched as may be. */
632 while (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == NOTE)
633 last1 = PREV_INSN (last1);
634 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
635 last1 = PREV_INSN (last1);
636 while (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == NOTE)
637 last2 = PREV_INSN (last2);
638 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
639 last2 = PREV_INSN (last2);
648 /* Return true iff outgoing edges of BB1 and BB2 match, together with
649 the branch instruction. This means that if we commonize the control
650 flow before end of the basic block, the semantic remains unchanged.
652 We may assume that there exists one edge with a common destination. */
655 outgoing_edges_match (bb1, bb2)
659 /* If BB1 has only one successor, we must be looking at an unconditional
660 jump. Which, by the assumption above, means that we only need to check
661 that BB2 has one successor. */
662 if (bb1->succ && !bb1->succ->succ_next)
663 return (bb2->succ && !bb2->succ->succ_next);
665 /* Match conditional jumps - this may get tricky when fallthru and branch
666 edges are crossed. */
668 && bb1->succ->succ_next
669 && !bb1->succ->succ_next->succ_next
670 && any_condjump_p (bb1->end))
674 rtx set1, set2, cond1, cond2;
675 enum rtx_code code1, code2;
678 || !bb2->succ->succ_next
679 || bb1->succ->succ_next->succ_next
680 || !any_condjump_p (bb2->end))
683 b1 = BRANCH_EDGE (bb1);
684 b2 = BRANCH_EDGE (bb2);
685 f1 = FALLTHRU_EDGE (bb1);
686 f2 = FALLTHRU_EDGE (bb2);
688 /* Get around possible forwarders on fallthru edges. Other cases
689 should be optimized out already. */
690 if (forwarder_block_p (f1->dest))
692 if (forwarder_block_p (f2->dest))
695 /* To simplify use of this function, return false if there are
696 unneeded forwarder blocks. These will get eliminated later
697 during cleanup_cfg. */
698 if (forwarder_block_p (f1->dest)
699 || forwarder_block_p (f2->dest)
700 || forwarder_block_p (b1->dest)
701 || forwarder_block_p (b2->dest))
704 if (f1->dest == f2->dest && b1->dest == b2->dest)
706 else if (f1->dest == b2->dest && b1->dest == f2->dest)
711 set1 = pc_set (bb1->end);
712 set2 = pc_set (bb2->end);
713 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
714 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
717 cond1 = XEXP (SET_SRC (set1), 0);
718 cond2 = XEXP (SET_SRC (set2), 0);
719 code1 = GET_CODE (cond1);
721 code2 = reversed_comparison_code (cond2, bb2->end);
723 code2 = GET_CODE (cond2);
724 if (code2 == UNKNOWN)
727 /* Verify codes and operands match. */
728 match = ((code1 == code2
729 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
730 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
731 || (code1 == swap_condition (code2)
732 && rtx_renumbered_equal_p (XEXP (cond1, 1),
734 && rtx_renumbered_equal_p (XEXP (cond1, 0),
737 /* If we return true, we will join the blocks. Which means that
738 we will only have one branch prediction bit to work with. Thus
739 we require the existing branches to have probabilities that are
741 /* ??? We should use bb->frequency to allow merging in infrequently
742 executed blocks, but at the moment it is not available when
743 cleanup_cfg is run. */
744 if (match && !optimize_size)
748 note1 = find_reg_note (bb1->end, REG_BR_PROB, 0);
749 note2 = find_reg_note (bb2->end, REG_BR_PROB, 0);
753 prob1 = INTVAL (XEXP (note1, 0));
754 prob2 = INTVAL (XEXP (note2, 0));
756 prob2 = REG_BR_PROB_BASE - prob2;
758 /* Fail if the difference in probabilities is
760 if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20)
763 else if (note1 || note2)
767 if (rtl_dump_file && match)
768 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
769 bb1->index, bb2->index);
774 /* ??? We can handle computed jumps too. This may be important for
775 inlined functions containing switch statements. Also jumps w/o
776 fallthru edges can be handled by simply matching whole insn. */
780 /* E1 and E2 are edges with the same destination block. Search their
781 predecessors for common code. If found, redirect control flow from
782 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
785 try_crossjump_to_edge (mode, e1, e2)
790 basic_block src1 = e1->src, src2 = e2->src;
791 basic_block redirect_to;
792 rtx newpos1, newpos2;
798 /* Search backward through forwarder blocks. We don't need to worry
799 about multiple entry or chained forwarders, as they will be optimized
800 away. We do this to look past the unconditional jump following a
801 conditional jump that is required due to the current CFG shape. */
803 && !src1->pred->pred_next
804 && forwarder_block_p (src1))
810 && !src2->pred->pred_next
811 && forwarder_block_p (src2))
817 /* Nothing to do if we reach ENTRY, or a common source block. */
818 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
823 /* Seeing more than 1 forwarder blocks would confuse us later... */
824 if (forwarder_block_p (e1->dest)
825 && forwarder_block_p (e1->dest->succ->dest))
827 if (forwarder_block_p (e2->dest)
828 && forwarder_block_p (e2->dest->succ->dest))
831 /* Likewise with dead code (possibly newly created by the other optimizations
833 if (!src1->pred || !src2->pred)
836 /* Likewise with complex edges.
837 ??? We should be able to handle most complex edges later with some
839 if (e1->flags & EDGE_COMPLEX)
842 /* Look for the common insn sequence, part the first ... */
843 if (!outgoing_edges_match (src1, src2))
846 /* ... and part the second. */
847 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
851 /* Avoid splitting if possible. */
852 if (newpos2 == src2->head)
857 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
858 src2->index, nmatch);
859 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
863 fprintf (rtl_dump_file,
864 "Cross jumping from bb %i to bb %i; %i common insns\n",
865 src1->index, src2->index, nmatch);
867 redirect_to->count += src1->count;
868 redirect_to->frequency += src1->frequency;
870 /* Recompute the frequencies and counts of outgoing edges. */
871 for (s = redirect_to->succ; s; s = s->succ_next)
874 basic_block d = s->dest;
876 if (forwarder_block_p (d))
878 for (s2 = src1->succ; ; s2 = s2->succ_next)
880 basic_block d2 = s2->dest;
881 if (forwarder_block_p (d2))
886 s->count += s2->count;
888 /* Take care to update possible forwarder blocks. We verified
889 that there is no more than one in the chain, so we can't run
890 into infinite loop. */
891 if (forwarder_block_p (s->dest))
893 s->dest->succ->count += s2->count;
894 s->dest->count += s2->count;
895 s->dest->frequency += EDGE_FREQUENCY (s);
897 if (forwarder_block_p (s2->dest))
899 s2->dest->succ->count -= s2->count;
900 s2->dest->count -= s2->count;
901 s2->dest->frequency -= EDGE_FREQUENCY (s);
903 if (!redirect_to->frequency && !src1->frequency)
904 s->probability = (s->probability + s2->probability) / 2;
907 ((s->probability * redirect_to->frequency +
908 s2->probability * src1->frequency)
909 / (redirect_to->frequency + src1->frequency));
912 note = find_reg_note (redirect_to->end, REG_BR_PROB, 0);
914 XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability);
916 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
918 /* Skip possible basic block header. */
919 if (GET_CODE (newpos1) == CODE_LABEL)
920 newpos1 = NEXT_INSN (newpos1);
921 if (GET_CODE (newpos1) == NOTE)
922 newpos1 = NEXT_INSN (newpos1);
925 /* Emit the jump insn. */
926 label = block_label (redirect_to);
927 src1->end = emit_jump_insn_before (gen_jump (label), newpos1);
928 JUMP_LABEL (src1->end) = label;
929 LABEL_NUSES (label)++;
930 if (basic_block_for_insn)
931 set_block_for_new_insns (src1->end, src1);
933 /* Delete the now unreachable instructions. */
934 flow_delete_insn_chain (newpos1, last);
936 /* Make sure there is a barrier after the new jump. */
937 last = next_nonnote_insn (src1->end);
938 if (!last || GET_CODE (last) != BARRIER)
939 emit_barrier_after (src1->end);
943 remove_edge (src1->succ);
944 make_edge (NULL, src1, redirect_to, 0);
945 src1->succ->probability = REG_BR_PROB_BASE;
946 src1->succ->count = src1->count;
951 /* Search the predecessors of BB for common insn sequences. When found,
952 share code between them by redirecting control flow. Return true if
956 try_crossjump_bb (mode, bb)
960 edge e, e2, nexte2, nexte, fallthru;
963 /* Nothing to do if there is not at least two incomming edges. */
964 if (!bb->pred || !bb->pred->pred_next)
967 /* It is always cheapest to redirect a block that ends in a branch to
968 a block that falls through into BB, as that adds no branches to the
969 program. We'll try that combination first. */
970 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
971 if (fallthru->flags & EDGE_FALLTHRU)
975 for (e = bb->pred; e; e = nexte)
977 nexte = e->pred_next;
979 /* Elide complex edges now, as neither try_crossjump_to_edge
980 nor outgoing_edges_match can handle them. */
981 if (e->flags & EDGE_COMPLEX)
984 /* As noted above, first try with the fallthru predecessor. */
987 /* Don't combine the fallthru edge into anything else.
988 If there is a match, we'll do it the other way around. */
992 if (try_crossjump_to_edge (mode, e, fallthru))
1000 /* Non-obvious work limiting check: Recognize that we're going
1001 to call try_crossjump_bb on every basic block. So if we have
1002 two blocks with lots of outgoing edges (a switch) and they
1003 share lots of common destinations, then we would do the
1004 cross-jump check once for each common destination.
1006 Now, if the blocks actually are cross-jump candidates, then
1007 all of their destinations will be shared. Which means that
1008 we only need check them for cross-jump candidacy once. We
1009 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1010 choosing to do the check from the block for which the edge
1011 in question is the first successor of A. */
1012 if (e->src->succ != e)
1015 for (e2 = bb->pred; e2; e2 = nexte2)
1017 nexte2 = e2->pred_next;
1022 /* We've already checked the fallthru edge above. */
1026 /* Again, neither try_crossjump_to_edge nor outgoing_edges_match
1027 can handle complex edges. */
1028 if (e2->flags & EDGE_COMPLEX)
1031 /* The "first successor" check above only prevents multiple
1032 checks of crossjump(A,B). In order to prevent redundant
1033 checks of crossjump(B,A), require that A be the block
1034 with the lowest index. */
1035 if (e->src->index > e2->src->index)
1038 if (try_crossjump_to_edge (mode, e, e2))
1050 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1051 instructions etc. Return nonzero if changes were made. */
1054 try_optimize_cfg (mode)
1058 bool changed_overall = false;
1062 /* Attempt to merge blocks as made possible by edge removal. If a block
1063 has only one successor, and the successor has only one predecessor,
1064 they may be combined. */
1072 fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n",
1075 for (i = 0; i < n_basic_blocks;)
1077 basic_block c, b = BASIC_BLOCK (i);
1079 bool changed_here = false;
1081 /* Delete trivially dead basic blocks. */
1082 while (b->pred == NULL)
1084 c = BASIC_BLOCK (b->index - 1);
1086 fprintf (rtl_dump_file, "Deleting block %i.\n", b->index);
1087 flow_delete_block (b);
1092 /* Remove code labels no longer used. Don't do this before
1093 CALL_PLACEHOLDER is removed, as some branches may be hidden
1095 if (b->pred->pred_next == NULL
1096 && (b->pred->flags & EDGE_FALLTHRU)
1097 && !(b->pred->flags & EDGE_COMPLEX)
1098 && GET_CODE (b->head) == CODE_LABEL
1099 && (!(mode & CLEANUP_PRE_SIBCALL)
1100 || !tail_recursion_label_p (b->head))
1101 /* If previous block ends with condjump jumping to next BB,
1102 we can't delete the label. */
1103 && (b->pred->src == ENTRY_BLOCK_PTR
1104 || !reg_mentioned_p (b->head, b->pred->src->end)))
1106 rtx label = b->head;
1107 b->head = NEXT_INSN (b->head);
1108 flow_delete_insn_chain (label, label);
1110 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1114 /* If we fall through an empty block, we can remove it. */
1115 if (b->pred->pred_next == NULL
1116 && (b->pred->flags & EDGE_FALLTHRU)
1117 && GET_CODE (b->head) != CODE_LABEL
1118 && forwarder_block_p (b)
1119 /* Note that forwarder_block_p true ensures that there
1120 is a successor for this block. */
1121 && (b->succ->flags & EDGE_FALLTHRU)
1122 && n_basic_blocks > 1)
1125 fprintf (rtl_dump_file, "Deleting fallthru block %i.\n",
1127 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1128 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1129 flow_delete_block (b);
1134 /* Merge blocks. Loop because chains of blocks might be
1136 while ((s = b->succ) != NULL
1137 && s->succ_next == NULL
1138 && !(s->flags & EDGE_COMPLEX)
1139 && (c = s->dest) != EXIT_BLOCK_PTR
1140 && c->pred->pred_next == NULL
1141 /* If the jump insn has side effects,
1142 we can't kill the edge. */
1143 && (GET_CODE (b->end) != JUMP_INSN
1144 || onlyjump_p (b->end))
1145 && merge_blocks (s, b, c, mode))
1146 changed_here = true;
1148 /* Simplify branch over branch. */
1149 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1150 changed_here = true;
1152 /* If B has a single outgoing edge, but uses a non-trivial jump
1153 instruction without side-effects, we can either delete the
1154 jump entirely, or replace it with a simple unconditional jump.
1155 Use redirect_edge_and_branch to do the dirty work. */
1157 && ! b->succ->succ_next
1158 && b->succ->dest != EXIT_BLOCK_PTR
1159 && onlyjump_p (b->end)
1160 && redirect_edge_and_branch (b->succ, b->succ->dest))
1161 changed_here = true;
1163 /* Simplify branch to branch. */
1164 if (try_forward_edges (mode, b))
1165 changed_here = true;
1167 /* Look for shared code between blocks. */
1168 if ((mode & CLEANUP_CROSSJUMP)
1169 && try_crossjump_bb (mode, b))
1170 changed_here = true;
1172 /* Don't get confused by the index shift caused by deleting
1180 if ((mode & CLEANUP_CROSSJUMP)
1181 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1184 #ifdef ENABLE_CHECKING
1186 verify_flow_info ();
1189 changed_overall |= changed;
1192 return changed_overall;
1195 /* Delete all unreachable basic blocks. */
1197 delete_unreachable_blocks ()
1200 bool changed = false;
1202 find_unreachable_blocks ();
1204 /* Delete all unreachable basic blocks. Count down so that we
1205 don't interfere with the block renumbering that happens in
1206 flow_delete_block. */
1208 for (i = n_basic_blocks - 1; i >= 0; --i)
1210 basic_block b = BASIC_BLOCK (i);
1212 if (!(b->flags & BB_REACHABLE))
1213 flow_delete_block (b), changed = true;
1217 tidy_fallthru_edges ();
1222 /* Tidy the CFG by deleting unreachable code and whatnot. */
1229 bool changed = false;
1231 timevar_push (TV_CLEANUP_CFG);
1232 changed = delete_unreachable_blocks ();
1233 if (try_optimize_cfg (mode))
1234 delete_unreachable_blocks (), changed = true;
1237 mark_critical_edges ();
1239 /* Kill the data we won't maintain. */
1240 free_EXPR_LIST_list (&label_value_list);
1241 free_EXPR_LIST_list (&tail_recursion_label_list);
1242 timevar_pop (TV_CLEANUP_CFG);
1244 /* Clear bb->aux on all basic blocks. */
1245 for (i = 0; i < n_basic_blocks; ++i)
1246 BASIC_BLOCK (i)->aux = NULL;