1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file contains optimizer of the control flow. The main entry point is
21 cleanup_cfg. Following optimizations are performed:
23 - Unreachable blocks removal
24 - Edge forwarding (edge to the forwarder block is forwarded to its
25 successor. Simplification of the branch instruction is performed by
26 underlying infrastructure so branch can be converted to simplejump or
28 - Cross jumping (tail merging)
29 - Conditional jump-around-simplejump simplification
30 - Basic block merging. */
34 #include "coretypes.h"
37 #include "hard-reg-set.h"
39 #include "insn-config.h"
42 #include "diagnostic-core.h"
47 #include "function.h" /* For inline functions in emit-rtl.h they need crtl. */
49 #include "tree-pass.h"
56 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58 /* Set to true when we are running first pass of try_optimize_cfg loop. */
59 static bool first_pass;
61 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
62 static bool crossjumps_occured;
64 /* Set to true if we couldn't run an optimization due to stale liveness
65 information; we should run df_analyze to enable more opportunities. */
66 static bool block_was_dirty;
68 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
69 static bool try_crossjump_bb (int, basic_block);
70 static bool outgoing_edges_match (int, basic_block, basic_block);
71 static enum replace_direction old_insns_match_p (int, rtx, rtx);
73 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
74 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
75 static bool try_optimize_cfg (int);
76 static bool try_simplify_condjump (basic_block);
77 static bool try_forward_edges (int, basic_block);
78 static edge thread_jump (edge, basic_block);
79 static bool mark_effect (rtx, bitmap);
80 static void notice_new_block (basic_block);
81 static void update_forwarder_flag (basic_block);
82 static int mentions_nonequal_regs (rtx *, void *);
83 static void merge_memattrs (rtx, rtx);
85 /* Set flags for newly created block. */
88 notice_new_block (basic_block bb)
93 if (forwarder_block_p (bb))
94 bb->flags |= BB_FORWARDER_BLOCK;
97 /* Recompute forwarder flag after block has been modified. */
100 update_forwarder_flag (basic_block bb)
102 if (forwarder_block_p (bb))
103 bb->flags |= BB_FORWARDER_BLOCK;
105 bb->flags &= ~BB_FORWARDER_BLOCK;
108 /* Simplify a conditional jump around an unconditional jump.
109 Return true if something changed. */
112 try_simplify_condjump (basic_block cbranch_block)
114 basic_block jump_block, jump_dest_block, cbranch_dest_block;
115 edge cbranch_jump_edge, cbranch_fallthru_edge;
118 /* Verify that there are exactly two successors. */
119 if (EDGE_COUNT (cbranch_block->succs) != 2)
122 /* Verify that we've got a normal conditional branch at the end
124 cbranch_insn = BB_END (cbranch_block);
125 if (!any_condjump_p (cbranch_insn))
128 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
129 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
131 /* The next block must not have multiple predecessors, must not
132 be the last block in the function, and must contain just the
133 unconditional jump. */
134 jump_block = cbranch_fallthru_edge->dest;
135 if (!single_pred_p (jump_block)
136 || jump_block->next_bb == EXIT_BLOCK_PTR
137 || !FORWARDER_BLOCK_P (jump_block))
139 jump_dest_block = single_succ (jump_block);
141 /* If we are partitioning hot/cold basic blocks, we don't want to
142 mess up unconditional or indirect jumps that cross between hot
145 Basic block partitioning may result in some jumps that appear to
146 be optimizable (or blocks that appear to be mergeable), but which really
147 must be left untouched (they are required to make it safely across
148 partition boundaries). See the comments at the top of
149 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
151 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
152 || (cbranch_jump_edge->flags & EDGE_CROSSING))
155 /* The conditional branch must target the block after the
156 unconditional branch. */
157 cbranch_dest_block = cbranch_jump_edge->dest;
159 if (cbranch_dest_block == EXIT_BLOCK_PTR
160 || !can_fallthru (jump_block, cbranch_dest_block))
163 /* Invert the conditional branch. */
164 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
168 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
169 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
171 /* Success. Update the CFG to match. Note that after this point
172 the edge variable names appear backwards; the redirection is done
173 this way to preserve edge profile data. */
174 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
176 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
178 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
179 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
180 update_br_prob_note (cbranch_block);
182 /* Delete the block with the unconditional jump, and clean up the mess. */
183 delete_basic_block (jump_block);
184 tidy_fallthru_edge (cbranch_jump_edge);
185 update_forwarder_flag (cbranch_block);
190 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
191 on register. Used by jump threading. */
194 mark_effect (rtx exp, regset nonequal)
198 switch (GET_CODE (exp))
200 /* In case we do clobber the register, mark it as equal, as we know the
201 value is dead so it don't have to match. */
203 if (REG_P (XEXP (exp, 0)))
205 dest = XEXP (exp, 0);
206 regno = REGNO (dest);
207 if (HARD_REGISTER_NUM_P (regno))
208 bitmap_clear_range (nonequal, regno,
209 hard_regno_nregs[regno][GET_MODE (dest)]);
211 bitmap_clear_bit (nonequal, regno);
216 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
218 dest = SET_DEST (exp);
223 regno = REGNO (dest);
224 if (HARD_REGISTER_NUM_P (regno))
225 bitmap_set_range (nonequal, regno,
226 hard_regno_nregs[regno][GET_MODE (dest)]);
228 bitmap_set_bit (nonequal, regno);
236 /* Return nonzero if X is a register set in regset DATA.
237 Called via for_each_rtx. */
239 mentions_nonequal_regs (rtx *x, void *data)
241 regset nonequal = (regset) data;
247 if (REGNO_REG_SET_P (nonequal, regno))
249 if (regno < FIRST_PSEUDO_REGISTER)
251 int n = hard_regno_nregs[regno][GET_MODE (*x)];
253 if (REGNO_REG_SET_P (nonequal, regno + n))
259 /* Attempt to prove that the basic block B will have no side effects and
260 always continues in the same edge if reached via E. Return the edge
261 if exist, NULL otherwise. */
264 thread_jump (edge e, basic_block b)
266 rtx set1, set2, cond1, cond2, insn;
267 enum rtx_code code1, code2, reversed_code2;
268 bool reverse1 = false;
272 reg_set_iterator rsi;
274 if (b->flags & BB_NONTHREADABLE_BLOCK)
277 /* At the moment, we do handle only conditional jumps, but later we may
278 want to extend this code to tablejumps and others. */
279 if (EDGE_COUNT (e->src->succs) != 2)
281 if (EDGE_COUNT (b->succs) != 2)
283 b->flags |= BB_NONTHREADABLE_BLOCK;
287 /* Second branch must end with onlyjump, as we will eliminate the jump. */
288 if (!any_condjump_p (BB_END (e->src)))
291 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
293 b->flags |= BB_NONTHREADABLE_BLOCK;
297 set1 = pc_set (BB_END (e->src));
298 set2 = pc_set (BB_END (b));
299 if (((e->flags & EDGE_FALLTHRU) != 0)
300 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
303 cond1 = XEXP (SET_SRC (set1), 0);
304 cond2 = XEXP (SET_SRC (set2), 0);
306 code1 = reversed_comparison_code (cond1, BB_END (e->src));
308 code1 = GET_CODE (cond1);
310 code2 = GET_CODE (cond2);
311 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
313 if (!comparison_dominates_p (code1, code2)
314 && !comparison_dominates_p (code1, reversed_code2))
317 /* Ensure that the comparison operators are equivalent.
318 ??? This is far too pessimistic. We should allow swapped operands,
319 different CCmodes, or for example comparisons for interval, that
320 dominate even when operands are not equivalent. */
321 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
322 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
325 /* Short circuit cases where block B contains some side effects, as we can't
327 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
328 insn = NEXT_INSN (insn))
329 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
331 b->flags |= BB_NONTHREADABLE_BLOCK;
337 /* First process all values computed in the source basic block. */
338 for (insn = NEXT_INSN (BB_HEAD (e->src));
339 insn != NEXT_INSN (BB_END (e->src));
340 insn = NEXT_INSN (insn))
342 cselib_process_insn (insn);
344 nonequal = BITMAP_ALLOC (NULL);
345 CLEAR_REG_SET (nonequal);
347 /* Now assume that we've continued by the edge E to B and continue
348 processing as if it were same basic block.
349 Our goal is to prove that whole block is an NOOP. */
351 for (insn = NEXT_INSN (BB_HEAD (b));
352 insn != NEXT_INSN (BB_END (b)) && !failed;
353 insn = NEXT_INSN (insn))
357 rtx pat = PATTERN (insn);
359 if (GET_CODE (pat) == PARALLEL)
361 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
362 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
365 failed |= mark_effect (pat, nonequal);
368 cselib_process_insn (insn);
371 /* Later we should clear nonequal of dead registers. So far we don't
372 have life information in cfg_cleanup. */
375 b->flags |= BB_NONTHREADABLE_BLOCK;
379 /* cond2 must not mention any register that is not equal to the
381 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
384 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
387 BITMAP_FREE (nonequal);
389 if ((comparison_dominates_p (code1, code2) != 0)
390 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
391 return BRANCH_EDGE (b);
393 return FALLTHRU_EDGE (b);
396 BITMAP_FREE (nonequal);
401 /* Attempt to forward edges leaving basic block B.
402 Return true if successful. */
405 try_forward_edges (int mode, basic_block b)
407 bool changed = false;
409 edge e, *threaded_edges = NULL;
411 /* If we are partitioning hot/cold basic blocks, we don't want to
412 mess up unconditional or indirect jumps that cross between hot
415 Basic block partitioning may result in some jumps that appear to
416 be optimizable (or blocks that appear to be mergeable), but which really
417 must be left untouched (they are required to make it safely across
418 partition boundaries). See the comments at the top of
419 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
421 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
424 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
426 basic_block target, first;
427 int counter, goto_locus;
428 bool threaded = false;
429 int nthreaded_edges = 0;
430 bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
432 /* Skip complex edges because we don't know how to update them.
434 Still handle fallthru edges, as we can succeed to forward fallthru
435 edge to the same place as the branch edge of conditional branch
436 and turn conditional branch to an unconditional branch. */
437 if (e->flags & EDGE_COMPLEX)
443 target = first = e->dest;
444 counter = NUM_FIXED_BLOCKS;
445 goto_locus = e->goto_locus;
447 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
448 up jumps that cross between hot/cold sections.
450 Basic block partitioning may result in some jumps that appear
451 to be optimizable (or blocks that appear to be mergeable), but which
452 really must be left untouched (they are required to make it safely
453 across partition boundaries). See the comments at the top of
454 bb-reorder.c:partition_hot_cold_basic_blocks for complete
457 if (first != EXIT_BLOCK_PTR
458 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
461 while (counter < n_basic_blocks)
463 basic_block new_target = NULL;
464 bool new_target_threaded = false;
465 may_thread |= (target->flags & BB_MODIFIED) != 0;
467 if (FORWARDER_BLOCK_P (target)
468 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
469 && single_succ (target) != EXIT_BLOCK_PTR)
471 /* Bypass trivial infinite loops. */
472 new_target = single_succ (target);
473 if (target == new_target)
474 counter = n_basic_blocks;
477 /* When not optimizing, ensure that edges or forwarder
478 blocks with different locus are not optimized out. */
479 int new_locus = single_succ_edge (target)->goto_locus;
480 int locus = goto_locus;
482 if (new_locus != UNKNOWN_LOCATION
483 && locus != UNKNOWN_LOCATION
484 && new_locus != locus)
490 if (new_locus != UNKNOWN_LOCATION)
493 last = BB_END (target);
494 if (DEBUG_INSN_P (last))
495 last = prev_nondebug_insn (last);
497 new_locus = last && INSN_P (last)
498 ? INSN_LOCATION (last) : 0;
500 if (new_locus != UNKNOWN_LOCATION
501 && locus != UNKNOWN_LOCATION
502 && new_locus != locus)
506 if (new_locus != UNKNOWN_LOCATION)
515 /* Allow to thread only over one edge at time to simplify updating
517 else if ((mode & CLEANUP_THREADING) && may_thread)
519 edge t = thread_jump (e, target);
523 threaded_edges = XNEWVEC (edge, n_basic_blocks);
528 /* Detect an infinite loop across blocks not
529 including the start block. */
530 for (i = 0; i < nthreaded_edges; ++i)
531 if (threaded_edges[i] == t)
533 if (i < nthreaded_edges)
535 counter = n_basic_blocks;
540 /* Detect an infinite loop across the start block. */
544 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
545 threaded_edges[nthreaded_edges++] = t;
547 new_target = t->dest;
548 new_target_threaded = true;
557 threaded |= new_target_threaded;
560 if (counter >= n_basic_blocks)
563 fprintf (dump_file, "Infinite loop in BB %i.\n",
566 else if (target == first)
567 ; /* We didn't do anything. */
570 /* Save the values now, as the edge may get removed. */
571 gcov_type edge_count = e->count;
572 int edge_probability = e->probability;
576 e->goto_locus = goto_locus;
578 /* Don't force if target is exit block. */
579 if (threaded && target != EXIT_BLOCK_PTR)
581 notice_new_block (redirect_edge_and_branch_force (e, target));
583 fprintf (dump_file, "Conditionals threaded.\n");
585 else if (!redirect_edge_and_branch (e, target))
589 "Forwarding edge %i->%i to %i failed.\n",
590 b->index, e->dest->index, target->index);
595 /* We successfully forwarded the edge. Now update profile
596 data: for each edge we traversed in the chain, remove
597 the original edge's execution count. */
598 edge_frequency = ((edge_probability * b->frequency
599 + REG_BR_PROB_BASE / 2)
606 if (!single_succ_p (first))
608 gcc_assert (n < nthreaded_edges);
609 t = threaded_edges [n++];
610 gcc_assert (t->src == first);
611 update_bb_profile_for_threading (first, edge_frequency,
613 update_br_prob_note (first);
617 first->count -= edge_count;
618 if (first->count < 0)
620 first->frequency -= edge_frequency;
621 if (first->frequency < 0)
622 first->frequency = 0;
623 /* It is possible that as the result of
624 threading we've removed edge as it is
625 threaded to the fallthru edge. Avoid
626 getting out of sync. */
627 if (n < nthreaded_edges
628 && first == threaded_edges [n]->src)
630 t = single_succ_edge (first);
633 t->count -= edge_count;
638 while (first != target);
646 free (threaded_edges);
651 /* Blocks A and B are to be merged into a single block. A has no incoming
652 fallthru edge, so it can be moved before B without adding or modifying
653 any jumps (aside from the jump from A to B). */
656 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
660 /* If we are partitioning hot/cold basic blocks, we don't want to
661 mess up unconditional or indirect jumps that cross between hot
664 Basic block partitioning may result in some jumps that appear to
665 be optimizable (or blocks that appear to be mergeable), but which really
666 must be left untouched (they are required to make it safely across
667 partition boundaries). See the comments at the top of
668 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
670 if (BB_PARTITION (a) != BB_PARTITION (b))
673 barrier = next_nonnote_insn (BB_END (a));
674 gcc_assert (BARRIER_P (barrier));
675 delete_insn (barrier);
677 /* Scramble the insn chain. */
678 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
679 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
683 fprintf (dump_file, "Moved block %d before %d and merged.\n",
686 /* Swap the records for the two blocks around. */
689 link_block (a, b->prev_bb);
691 /* Now blocks A and B are contiguous. Merge them. */
695 /* Blocks A and B are to be merged into a single block. B has no outgoing
696 fallthru edge, so it can be moved after A without adding or modifying
697 any jumps (aside from the jump from A to B). */
700 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
702 rtx barrier, real_b_end;
705 /* If we are partitioning hot/cold basic blocks, we don't want to
706 mess up unconditional or indirect jumps that cross between hot
709 Basic block partitioning may result in some jumps that appear to
710 be optimizable (or blocks that appear to be mergeable), but which really
711 must be left untouched (they are required to make it safely across
712 partition boundaries). See the comments at the top of
713 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
715 if (BB_PARTITION (a) != BB_PARTITION (b))
718 real_b_end = BB_END (b);
720 /* If there is a jump table following block B temporarily add the jump table
721 to block B so that it will also be moved to the correct location. */
722 if (tablejump_p (BB_END (b), &label, &table)
723 && prev_active_insn (label) == BB_END (b))
728 /* There had better have been a barrier there. Delete it. */
729 barrier = NEXT_INSN (BB_END (b));
730 if (barrier && BARRIER_P (barrier))
731 delete_insn (barrier);
734 /* Scramble the insn chain. */
735 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
737 /* Restore the real end of b. */
738 BB_END (b) = real_b_end;
741 fprintf (dump_file, "Moved block %d after %d and merged.\n",
744 /* Now blocks A and B are contiguous. Merge them. */
748 /* Attempt to merge basic blocks that are potentially non-adjacent.
749 Return NULL iff the attempt failed, otherwise return basic block
750 where cleanup_cfg should continue. Because the merging commonly
751 moves basic block away or introduces another optimization
752 possibility, return basic block just before B so cleanup_cfg don't
755 It may be good idea to return basic block before C in the case
756 C has been moved after B and originally appeared earlier in the
757 insn sequence, but we have no information available about the
758 relative ordering of these two. Hopefully it is not too common. */
761 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
765 /* If we are partitioning hot/cold basic blocks, we don't want to
766 mess up unconditional or indirect jumps that cross between hot
769 Basic block partitioning may result in some jumps that appear to
770 be optimizable (or blocks that appear to be mergeable), but which really
771 must be left untouched (they are required to make it safely across
772 partition boundaries). See the comments at the top of
773 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
775 if (BB_PARTITION (b) != BB_PARTITION (c))
778 /* If B has a fallthru edge to C, no need to move anything. */
779 if (e->flags & EDGE_FALLTHRU)
781 int b_index = b->index, c_index = c->index;
783 /* Protect the loop latches. */
784 if (current_loops && c->loop_father->latch == c)
788 update_forwarder_flag (b);
791 fprintf (dump_file, "Merged %d and %d without moving.\n",
794 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
797 /* Otherwise we will need to move code around. Do that only if expensive
798 transformations are allowed. */
799 else if (mode & CLEANUP_EXPENSIVE)
801 edge tmp_edge, b_fallthru_edge;
802 bool c_has_outgoing_fallthru;
803 bool b_has_incoming_fallthru;
805 /* Avoid overactive code motion, as the forwarder blocks should be
806 eliminated by edge redirection instead. One exception might have
807 been if B is a forwarder block and C has no fallthru edge, but
808 that should be cleaned up by bb-reorder instead. */
809 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
812 /* We must make sure to not munge nesting of lexical blocks,
813 and loop notes. This is done by squeezing out all the notes
814 and leaving them there to lie. Not ideal, but functional. */
816 tmp_edge = find_fallthru_edge (c->succs);
817 c_has_outgoing_fallthru = (tmp_edge != NULL);
819 tmp_edge = find_fallthru_edge (b->preds);
820 b_has_incoming_fallthru = (tmp_edge != NULL);
821 b_fallthru_edge = tmp_edge;
824 next = next->prev_bb;
826 /* Otherwise, we're going to try to move C after B. If C does
827 not have an outgoing fallthru, then it can be moved
828 immediately after B without introducing or modifying jumps. */
829 if (! c_has_outgoing_fallthru)
831 merge_blocks_move_successor_nojumps (b, c);
832 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
835 /* If B does not have an incoming fallthru, then it can be moved
836 immediately before C without introducing or modifying jumps.
837 C cannot be the first block, so we do not have to worry about
838 accessing a non-existent block. */
840 if (b_has_incoming_fallthru)
844 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
846 bb = force_nonfallthru (b_fallthru_edge);
848 notice_new_block (bb);
851 merge_blocks_move_predecessor_nojumps (b, c);
852 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
859 /* Removes the memory attributes of MEM expression
860 if they are not equal. */
863 merge_memattrs (rtx x, rtx y)
872 if (x == 0 || y == 0)
877 if (code != GET_CODE (y))
880 if (GET_MODE (x) != GET_MODE (y))
883 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
887 else if (! MEM_ATTRS (y))
891 HOST_WIDE_INT mem_size;
893 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
895 set_mem_alias_set (x, 0);
896 set_mem_alias_set (y, 0);
899 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
903 clear_mem_offset (x);
904 clear_mem_offset (y);
906 else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
907 || (MEM_OFFSET_KNOWN_P (x)
908 && MEM_OFFSET (x) != MEM_OFFSET (y)))
910 clear_mem_offset (x);
911 clear_mem_offset (y);
914 if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
916 mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
917 set_mem_size (x, mem_size);
918 set_mem_size (y, mem_size);
926 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
927 set_mem_align (y, MEM_ALIGN (x));
932 if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
934 MEM_READONLY_P (x) = 0;
935 MEM_READONLY_P (y) = 0;
937 if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
939 MEM_NOTRAP_P (x) = 0;
940 MEM_NOTRAP_P (y) = 0;
942 if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
944 MEM_VOLATILE_P (x) = 1;
945 MEM_VOLATILE_P (y) = 1;
949 fmt = GET_RTX_FORMAT (code);
950 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
955 /* Two vectors must have the same length. */
956 if (XVECLEN (x, i) != XVECLEN (y, i))
959 for (j = 0; j < XVECLEN (x, i); j++)
960 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
965 merge_memattrs (XEXP (x, i), XEXP (y, i));
972 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
973 different single sets S1 and S2. */
976 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
981 if (p1 == s1 && p2 == s2)
984 if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
987 if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
990 for (i = 0; i < XVECLEN (p1, 0); i++)
992 e1 = XVECEXP (p1, 0, i);
993 e2 = XVECEXP (p2, 0, i);
994 if (e1 == s1 && e2 == s2)
997 ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
1006 /* Examine register notes on I1 and I2 and return:
1007 - dir_forward if I1 can be replaced by I2, or
1008 - dir_backward if I2 can be replaced by I1, or
1009 - dir_both if both are the case. */
1011 static enum replace_direction
1012 can_replace_by (rtx i1, rtx i2)
1014 rtx s1, s2, d1, d2, src1, src2, note1, note2;
1017 /* Check for 2 sets. */
1018 s1 = single_set (i1);
1019 s2 = single_set (i2);
1020 if (s1 == NULL_RTX || s2 == NULL_RTX)
1023 /* Check that the 2 sets set the same dest. */
1026 if (!(reload_completed
1027 ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1030 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1031 set dest to the same value. */
1032 note1 = find_reg_equal_equiv_note (i1);
1033 note2 = find_reg_equal_equiv_note (i2);
1034 if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
1035 || !CONST_INT_P (XEXP (note1, 0)))
1038 if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1041 /* Although the 2 sets set dest to the same value, we cannot replace
1042 (set (dest) (const_int))
1045 because we don't know if the reg is live and has the same value at the
1046 location of replacement. */
1047 src1 = SET_SRC (s1);
1048 src2 = SET_SRC (s2);
1049 c1 = CONST_INT_P (src1);
1050 c2 = CONST_INT_P (src2);
1056 return dir_backward;
1061 /* Merges directions A and B. */
1063 static enum replace_direction
1064 merge_dir (enum replace_direction a, enum replace_direction b)
1066 /* Implements the following table:
1085 /* Examine I1 and I2 and return:
1086 - dir_forward if I1 can be replaced by I2, or
1087 - dir_backward if I2 can be replaced by I1, or
1088 - dir_both if both are the case. */
1090 static enum replace_direction
1091 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1095 /* Verify that I1 and I2 are equivalent. */
1096 if (GET_CODE (i1) != GET_CODE (i2))
1099 /* __builtin_unreachable() may lead to empty blocks (ending with
1100 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1101 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1104 /* ??? Do not allow cross-jumping between different stack levels. */
1105 p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1106 p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1111 if (!rtx_equal_p (p1, p2))
1114 /* ??? Worse, this adjustment had better be constant lest we
1115 have differing incoming stack levels. */
1116 if (!frame_pointer_needed
1117 && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
1126 if (GET_CODE (p1) != GET_CODE (p2))
1129 /* If this is a CALL_INSN, compare register usage information.
1130 If we don't check this on stack register machines, the two
1131 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1132 numbers of stack registers in the same basic block.
1133 If we don't check this on machines with delay slots, a delay slot may
1134 be filled that clobbers a parameter expected by the subroutine.
1136 ??? We take the simple route for now and assume that if they're
1137 equal, they were constructed identically.
1139 Also check for identical exception regions. */
1143 /* Ensure the same EH region. */
1144 rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1145 rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1150 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1153 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1154 CALL_INSN_FUNCTION_USAGE (i2))
1155 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1158 /* For address sanitizer, never crossjump __asan_report_* builtins,
1159 otherwise errors might be reported on incorrect lines. */
1162 rtx call = get_call_rtx_from (i1);
1163 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1165 rtx symbol = XEXP (XEXP (call, 0), 0);
1166 if (SYMBOL_REF_DECL (symbol)
1167 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1169 if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1171 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1172 >= BUILT_IN_ASAN_REPORT_LOAD1
1173 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1174 <= BUILT_IN_ASAN_REPORT_STORE16)
1182 /* If cross_jump_death_matters is not 0, the insn's mode
1183 indicates whether or not the insn contains any stack-like
1186 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1188 /* If register stack conversion has already been done, then
1189 death notes must also be compared before it is certain that
1190 the two instruction streams match. */
1193 HARD_REG_SET i1_regset, i2_regset;
1195 CLEAR_HARD_REG_SET (i1_regset);
1196 CLEAR_HARD_REG_SET (i2_regset);
1198 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1199 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1200 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1202 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1203 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1204 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1206 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1211 if (reload_completed
1212 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1215 return can_replace_by (i1, i2);
1218 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1219 flow_find_head_matching_sequence, ensure the notes match. */
1222 merge_notes (rtx i1, rtx i2)
1224 /* If the merged insns have different REG_EQUAL notes, then
1226 rtx equiv1 = find_reg_equal_equiv_note (i1);
1227 rtx equiv2 = find_reg_equal_equiv_note (i2);
1229 if (equiv1 && !equiv2)
1230 remove_note (i1, equiv1);
1231 else if (!equiv1 && equiv2)
1232 remove_note (i2, equiv2);
1233 else if (equiv1 && equiv2
1234 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1236 remove_note (i1, equiv1);
1237 remove_note (i2, equiv2);
1241 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1242 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1243 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1244 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1245 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1248 walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru,
1253 *did_fallthru = false;
1256 while (!NONDEBUG_INSN_P (*i1))
1258 if (*i1 != BB_HEAD (*bb1))
1260 *i1 = PREV_INSN (*i1);
1264 if (!follow_fallthru)
1267 fallthru = find_fallthru_edge ((*bb1)->preds);
1268 if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
1269 || !single_succ_p (fallthru->src))
1272 *bb1 = fallthru->src;
1273 *i1 = BB_END (*bb1);
1274 *did_fallthru = true;
1278 /* Look through the insns at the end of BB1 and BB2 and find the longest
1279 sequence that are either equivalent, or allow forward or backward
1280 replacement. Store the first insns for that sequence in *F1 and *F2 and
1281 return the sequence length.
1283 DIR_P indicates the allowed replacement direction on function entry, and
1284 the actual replacement direction on function exit. If NULL, only equivalent
1285 sequences are allowed.
1287 To simplify callers of this function, if the blocks match exactly,
1288 store the head of the blocks in *F1 and *F2. */
1291 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2,
1292 enum replace_direction *dir_p)
1294 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1297 enum replace_direction dir, last_dir, afterlast_dir;
1298 bool follow_fallthru, did_fallthru;
1304 afterlast_dir = dir;
1305 last_dir = afterlast_dir;
1307 /* Skip simple jumps at the end of the blocks. Complex jumps still
1308 need to be compared for equivalence, which we'll do below. */
1311 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1313 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1316 i1 = PREV_INSN (i1);
1321 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1324 /* Count everything except for unconditional jump as insn. */
1325 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1327 i2 = PREV_INSN (i2);
1332 /* In the following example, we can replace all jumps to C by jumps to A.
1334 This removes 4 duplicate insns.
1335 [bb A] insn1 [bb C] insn1
1341 We could also replace all jumps to A by jumps to C, but that leaves B
1342 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1343 step, all jumps to B would be replaced with jumps to the middle of C,
1344 achieving the same result with more effort.
1345 So we allow only the first possibility, which means that we don't allow
1346 fallthru in the block that's being replaced. */
1348 follow_fallthru = dir_p && dir != dir_forward;
1349 walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1353 follow_fallthru = dir_p && dir != dir_backward;
1354 walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1358 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1361 dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1362 if (dir == dir_none || (!dir_p && dir != dir_both))
1365 merge_memattrs (i1, i2);
1367 /* Don't begin a cross-jump with a NOTE insn. */
1370 merge_notes (i1, i2);
1372 afterlast1 = last1, afterlast2 = last2;
1373 last1 = i1, last2 = i2;
1374 afterlast_dir = last_dir;
1377 if (!(GET_CODE (p1) == USE || GET_CODE (p1) == CLOBBER))
1381 i1 = PREV_INSN (i1);
1382 i2 = PREV_INSN (i2);
1386 /* Don't allow the insn after a compare to be shared by
1387 cross-jumping unless the compare is also shared. */
1388 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1389 last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1392 /* Include preceding notes and labels in the cross-jump. One,
1393 this may bring us to the head of the blocks as requested above.
1394 Two, it keeps line number notes as matched as may be. */
1397 bb1 = BLOCK_FOR_INSN (last1);
1398 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1399 last1 = PREV_INSN (last1);
1401 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1402 last1 = PREV_INSN (last1);
1404 bb2 = BLOCK_FOR_INSN (last2);
1405 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1406 last2 = PREV_INSN (last2);
1408 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1409 last2 = PREV_INSN (last2);
1420 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1421 the head of the two blocks. Do not include jumps at the end.
1422 If STOP_AFTER is nonzero, stop after finding that many matching
1426 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
1427 rtx *f2, int stop_after)
1429 rtx i1, i2, last1, last2, beforelast1, beforelast2;
1433 int nehedges1 = 0, nehedges2 = 0;
1435 FOR_EACH_EDGE (e, ei, bb1->succs)
1436 if (e->flags & EDGE_EH)
1438 FOR_EACH_EDGE (e, ei, bb2->succs)
1439 if (e->flags & EDGE_EH)
1444 last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
1448 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1449 while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1451 if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1453 i1 = NEXT_INSN (i1);
1456 while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1458 if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1460 i2 = NEXT_INSN (i2);
1463 if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1464 || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1467 if (NOTE_P (i1) || NOTE_P (i2)
1468 || JUMP_P (i1) || JUMP_P (i2))
1471 /* A sanity check to make sure we're not merging insns with different
1472 effects on EH. If only one of them ends a basic block, it shouldn't
1473 have an EH edge; if both end a basic block, there should be the same
1474 number of EH edges. */
1475 if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1477 || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1479 || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1480 && nehedges1 != nehedges2))
1483 if (old_insns_match_p (0, i1, i2) != dir_both)
1486 merge_memattrs (i1, i2);
1488 /* Don't begin a cross-jump with a NOTE insn. */
1491 merge_notes (i1, i2);
1493 beforelast1 = last1, beforelast2 = last2;
1494 last1 = i1, last2 = i2;
1498 if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1499 || (stop_after > 0 && ninsns == stop_after))
1502 i1 = NEXT_INSN (i1);
1503 i2 = NEXT_INSN (i2);
1507 /* Don't allow a compare to be shared by cross-jumping unless the insn
1508 after the compare is also shared. */
1509 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
1510 last1 = beforelast1, last2 = beforelast2, ninsns--;
1522 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1523 the branch instruction. This means that if we commonize the control
1524 flow before end of the basic block, the semantic remains unchanged.
1526 We may assume that there exists one edge with a common destination. */
1529 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1531 int nehedges1 = 0, nehedges2 = 0;
1532 edge fallthru1 = 0, fallthru2 = 0;
1536 /* If we performed shrink-wrapping, edges to the EXIT_BLOCK_PTR can
1537 only be distinguished for JUMP_INSNs. The two paths may differ in
1538 whether they went through the prologue. Sibcalls are fine, we know
1539 that we either didn't need or inserted an epilogue before them. */
1540 if (crtl->shrink_wrapped
1541 && single_succ_p (bb1) && single_succ (bb1) == EXIT_BLOCK_PTR
1542 && !JUMP_P (BB_END (bb1))
1543 && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1546 /* If BB1 has only one successor, we may be looking at either an
1547 unconditional jump, or a fake edge to exit. */
1548 if (single_succ_p (bb1)
1549 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1550 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1551 return (single_succ_p (bb2)
1552 && (single_succ_edge (bb2)->flags
1553 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1554 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1556 /* Match conditional jumps - this may get tricky when fallthru and branch
1557 edges are crossed. */
1558 if (EDGE_COUNT (bb1->succs) == 2
1559 && any_condjump_p (BB_END (bb1))
1560 && onlyjump_p (BB_END (bb1)))
1562 edge b1, f1, b2, f2;
1563 bool reverse, match;
1564 rtx set1, set2, cond1, cond2;
1565 enum rtx_code code1, code2;
1567 if (EDGE_COUNT (bb2->succs) != 2
1568 || !any_condjump_p (BB_END (bb2))
1569 || !onlyjump_p (BB_END (bb2)))
1572 b1 = BRANCH_EDGE (bb1);
1573 b2 = BRANCH_EDGE (bb2);
1574 f1 = FALLTHRU_EDGE (bb1);
1575 f2 = FALLTHRU_EDGE (bb2);
1577 /* Get around possible forwarders on fallthru edges. Other cases
1578 should be optimized out already. */
1579 if (FORWARDER_BLOCK_P (f1->dest))
1580 f1 = single_succ_edge (f1->dest);
1582 if (FORWARDER_BLOCK_P (f2->dest))
1583 f2 = single_succ_edge (f2->dest);
1585 /* To simplify use of this function, return false if there are
1586 unneeded forwarder blocks. These will get eliminated later
1587 during cleanup_cfg. */
1588 if (FORWARDER_BLOCK_P (f1->dest)
1589 || FORWARDER_BLOCK_P (f2->dest)
1590 || FORWARDER_BLOCK_P (b1->dest)
1591 || FORWARDER_BLOCK_P (b2->dest))
1594 if (f1->dest == f2->dest && b1->dest == b2->dest)
1596 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1601 set1 = pc_set (BB_END (bb1));
1602 set2 = pc_set (BB_END (bb2));
1603 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1604 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1607 cond1 = XEXP (SET_SRC (set1), 0);
1608 cond2 = XEXP (SET_SRC (set2), 0);
1609 code1 = GET_CODE (cond1);
1611 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1613 code2 = GET_CODE (cond2);
1615 if (code2 == UNKNOWN)
1618 /* Verify codes and operands match. */
1619 match = ((code1 == code2
1620 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1621 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1622 || (code1 == swap_condition (code2)
1623 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1625 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1628 /* If we return true, we will join the blocks. Which means that
1629 we will only have one branch prediction bit to work with. Thus
1630 we require the existing branches to have probabilities that are
1633 && optimize_bb_for_speed_p (bb1)
1634 && optimize_bb_for_speed_p (bb2))
1638 if (b1->dest == b2->dest)
1639 prob2 = b2->probability;
1641 /* Do not use f2 probability as f2 may be forwarded. */
1642 prob2 = REG_BR_PROB_BASE - b2->probability;
1644 /* Fail if the difference in probabilities is greater than 50%.
1645 This rules out two well-predicted branches with opposite
1647 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1651 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1652 bb1->index, bb2->index, b1->probability, prob2);
1658 if (dump_file && match)
1659 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1660 bb1->index, bb2->index);
1665 /* Generic case - we are seeing a computed jump, table jump or trapping
1668 /* Check whether there are tablejumps in the end of BB1 and BB2.
1669 Return true if they are identical. */
1674 if (tablejump_p (BB_END (bb1), &label1, &table1)
1675 && tablejump_p (BB_END (bb2), &label2, &table2)
1676 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1678 /* The labels should never be the same rtx. If they really are same
1679 the jump tables are same too. So disable crossjumping of blocks BB1
1680 and BB2 because when deleting the common insns in the end of BB1
1681 by delete_basic_block () the jump table would be deleted too. */
1682 /* If LABEL2 is referenced in BB1->END do not do anything
1683 because we would loose information when replacing
1684 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1685 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1687 /* Set IDENTICAL to true when the tables are identical. */
1688 bool identical = false;
1691 p1 = PATTERN (table1);
1692 p2 = PATTERN (table2);
1693 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1697 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1698 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1699 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1700 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1705 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1706 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1712 replace_label_data rr;
1715 /* Temporarily replace references to LABEL1 with LABEL2
1716 in BB1->END so that we could compare the instructions. */
1719 rr.update_label_nuses = false;
1720 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1722 match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1724 if (dump_file && match)
1726 "Tablejumps in bb %i and %i match.\n",
1727 bb1->index, bb2->index);
1729 /* Set the original label in BB1->END because when deleting
1730 a block whose end is a tablejump, the tablejump referenced
1731 from the instruction is deleted too. */
1734 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1743 rtx last1 = BB_END (bb1);
1744 rtx last2 = BB_END (bb2);
1745 if (DEBUG_INSN_P (last1))
1746 last1 = prev_nondebug_insn (last1);
1747 if (DEBUG_INSN_P (last2))
1748 last2 = prev_nondebug_insn (last2);
1749 /* First ensure that the instructions match. There may be many outgoing
1750 edges so this test is generally cheaper. */
1751 if (old_insns_match_p (mode, last1, last2) != dir_both)
1754 /* Search the outgoing edges, ensure that the counts do match, find possible
1755 fallthru and exception handling edges since these needs more
1757 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1760 bool nonfakeedges = false;
1761 FOR_EACH_EDGE (e1, ei, bb1->succs)
1763 e2 = EDGE_SUCC (bb2, ei.index);
1765 if ((e1->flags & EDGE_FAKE) == 0)
1766 nonfakeedges = true;
1768 if (e1->flags & EDGE_EH)
1771 if (e2->flags & EDGE_EH)
1774 if (e1->flags & EDGE_FALLTHRU)
1776 if (e2->flags & EDGE_FALLTHRU)
1780 /* If number of edges of various types does not match, fail. */
1781 if (nehedges1 != nehedges2
1782 || (fallthru1 != 0) != (fallthru2 != 0))
1785 /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1786 and the last real insn doesn't have REG_ARGS_SIZE note, don't
1787 attempt to optimize, as the two basic blocks might have different
1788 REG_ARGS_SIZE depths. For noreturn calls and unconditional
1789 traps there should be REG_ARG_SIZE notes, they could be missing
1790 for __builtin_unreachable () uses though. */
1792 && !ACCUMULATE_OUTGOING_ARGS
1794 || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1797 /* fallthru edges must be forwarded to the same destination. */
1800 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1801 ? single_succ (fallthru1->dest): fallthru1->dest);
1802 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1803 ? single_succ (fallthru2->dest): fallthru2->dest);
1809 /* Ensure the same EH region. */
1811 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1812 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1817 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1821 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1822 version of sequence abstraction. */
1823 FOR_EACH_EDGE (e1, ei, bb2->succs)
1827 basic_block d1 = e1->dest;
1829 if (FORWARDER_BLOCK_P (d1))
1830 d1 = EDGE_SUCC (d1, 0)->dest;
1832 FOR_EACH_EDGE (e2, ei, bb1->succs)
1834 basic_block d2 = e2->dest;
1835 if (FORWARDER_BLOCK_P (d2))
1836 d2 = EDGE_SUCC (d2, 0)->dest;
1848 /* Returns true if BB basic block has a preserve label. */
1851 block_has_preserve_label (basic_block bb)
1855 && LABEL_PRESERVE_P (block_label (bb)));
1858 /* E1 and E2 are edges with the same destination block. Search their
1859 predecessors for common code. If found, redirect control flow from
1860 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1861 or the other way around (dir_backward). DIR specifies the allowed
1862 replacement direction. */
1865 try_crossjump_to_edge (int mode, edge e1, edge e2,
1866 enum replace_direction dir)
1869 basic_block src1 = e1->src, src2 = e2->src;
1870 basic_block redirect_to, redirect_from, to_remove;
1871 basic_block osrc1, osrc2, redirect_edges_to, tmp;
1872 rtx newpos1, newpos2;
1876 newpos1 = newpos2 = NULL_RTX;
1878 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1879 to try this optimization.
1881 Basic block partitioning may result in some jumps that appear to
1882 be optimizable (or blocks that appear to be mergeable), but which really
1883 must be left untouched (they are required to make it safely across
1884 partition boundaries). See the comments at the top of
1885 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1887 if (flag_reorder_blocks_and_partition && reload_completed)
1890 /* Search backward through forwarder blocks. We don't need to worry
1891 about multiple entry or chained forwarders, as they will be optimized
1892 away. We do this to look past the unconditional jump following a
1893 conditional jump that is required due to the current CFG shape. */
1894 if (single_pred_p (src1)
1895 && FORWARDER_BLOCK_P (src1))
1896 e1 = single_pred_edge (src1), src1 = e1->src;
1898 if (single_pred_p (src2)
1899 && FORWARDER_BLOCK_P (src2))
1900 e2 = single_pred_edge (src2), src2 = e2->src;
1902 /* Nothing to do if we reach ENTRY, or a common source block. */
1903 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1908 /* Seeing more than 1 forwarder blocks would confuse us later... */
1909 if (FORWARDER_BLOCK_P (e1->dest)
1910 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1913 if (FORWARDER_BLOCK_P (e2->dest)
1914 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1917 /* Likewise with dead code (possibly newly created by the other optimizations
1919 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1922 /* Look for the common insn sequence, part the first ... */
1923 if (!outgoing_edges_match (mode, src1, src2))
1926 /* ... and part the second. */
1927 nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1931 if (newpos1 != NULL_RTX)
1932 src1 = BLOCK_FOR_INSN (newpos1);
1933 if (newpos2 != NULL_RTX)
1934 src2 = BLOCK_FOR_INSN (newpos2);
1936 if (dir == dir_backward)
1938 #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1939 SWAP (basic_block, osrc1, osrc2);
1940 SWAP (basic_block, src1, src2);
1941 SWAP (edge, e1, e2);
1942 SWAP (rtx, newpos1, newpos2);
1946 /* Don't proceed with the crossjump unless we found a sufficient number
1947 of matching instructions or the 'from' block was totally matched
1948 (such that its predecessors will hopefully be redirected and the
1950 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1951 && (newpos1 != BB_HEAD (src1)))
1954 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1955 if (block_has_preserve_label (e1->dest)
1956 && (e1->flags & EDGE_ABNORMAL))
1959 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1961 If we have tablejumps in the end of SRC1 and SRC2
1962 they have been already compared for equivalence in outgoing_edges_match ()
1963 so replace the references to TABLE1 by references to TABLE2. */
1968 if (tablejump_p (BB_END (osrc1), &label1, &table1)
1969 && tablejump_p (BB_END (osrc2), &label2, &table2)
1970 && label1 != label2)
1972 replace_label_data rr;
1975 /* Replace references to LABEL1 with LABEL2. */
1978 rr.update_label_nuses = true;
1979 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1981 /* Do not replace the label in SRC1->END because when deleting
1982 a block whose end is a tablejump, the tablejump referenced
1983 from the instruction is deleted too. */
1984 if (insn != BB_END (osrc1))
1985 for_each_rtx (&insn, replace_label, &rr);
1990 /* Avoid splitting if possible. We must always split when SRC2 has
1991 EH predecessor edges, or we may end up with basic blocks with both
1992 normal and EH predecessor edges. */
1993 if (newpos2 == BB_HEAD (src2)
1994 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1998 if (newpos2 == BB_HEAD (src2))
2000 /* Skip possible basic block header. */
2001 if (LABEL_P (newpos2))
2002 newpos2 = NEXT_INSN (newpos2);
2003 while (DEBUG_INSN_P (newpos2))
2004 newpos2 = NEXT_INSN (newpos2);
2005 if (NOTE_P (newpos2))
2006 newpos2 = NEXT_INSN (newpos2);
2007 while (DEBUG_INSN_P (newpos2))
2008 newpos2 = NEXT_INSN (newpos2);
2012 fprintf (dump_file, "Splitting bb %i before %i insns\n",
2013 src2->index, nmatch);
2014 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2019 "Cross jumping from bb %i to bb %i; %i common insns\n",
2020 src1->index, src2->index, nmatch);
2022 /* We may have some registers visible through the block. */
2023 df_set_bb_dirty (redirect_to);
2026 redirect_edges_to = redirect_to;
2028 redirect_edges_to = osrc2;
2030 /* Recompute the frequencies and counts of outgoing edges. */
2031 FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2035 basic_block d = s->dest;
2037 if (FORWARDER_BLOCK_P (d))
2038 d = single_succ (d);
2040 FOR_EACH_EDGE (s2, ei, src1->succs)
2042 basic_block d2 = s2->dest;
2043 if (FORWARDER_BLOCK_P (d2))
2044 d2 = single_succ (d2);
2049 s->count += s2->count;
2051 /* Take care to update possible forwarder blocks. We verified
2052 that there is no more than one in the chain, so we can't run
2053 into infinite loop. */
2054 if (FORWARDER_BLOCK_P (s->dest))
2056 single_succ_edge (s->dest)->count += s2->count;
2057 s->dest->count += s2->count;
2058 s->dest->frequency += EDGE_FREQUENCY (s);
2061 if (FORWARDER_BLOCK_P (s2->dest))
2063 single_succ_edge (s2->dest)->count -= s2->count;
2064 if (single_succ_edge (s2->dest)->count < 0)
2065 single_succ_edge (s2->dest)->count = 0;
2066 s2->dest->count -= s2->count;
2067 s2->dest->frequency -= EDGE_FREQUENCY (s);
2068 if (s2->dest->frequency < 0)
2069 s2->dest->frequency = 0;
2070 if (s2->dest->count < 0)
2071 s2->dest->count = 0;
2074 if (!redirect_edges_to->frequency && !src1->frequency)
2075 s->probability = (s->probability + s2->probability) / 2;
2078 = ((s->probability * redirect_edges_to->frequency +
2079 s2->probability * src1->frequency)
2080 / (redirect_edges_to->frequency + src1->frequency));
2083 /* Adjust count and frequency for the block. An earlier jump
2084 threading pass may have left the profile in an inconsistent
2085 state (see update_bb_profile_for_threading) so we must be
2086 prepared for overflows. */
2090 tmp->count += src1->count;
2091 tmp->frequency += src1->frequency;
2092 if (tmp->frequency > BB_FREQ_MAX)
2093 tmp->frequency = BB_FREQ_MAX;
2094 if (tmp == redirect_edges_to)
2096 tmp = find_fallthru_edge (tmp->succs)->dest;
2099 update_br_prob_note (redirect_edges_to);
2101 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2103 /* Skip possible basic block header. */
2104 if (LABEL_P (newpos1))
2105 newpos1 = NEXT_INSN (newpos1);
2107 while (DEBUG_INSN_P (newpos1))
2108 newpos1 = NEXT_INSN (newpos1);
2110 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2111 newpos1 = NEXT_INSN (newpos1);
2113 while (DEBUG_INSN_P (newpos1))
2114 newpos1 = NEXT_INSN (newpos1);
2116 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2117 to_remove = single_succ (redirect_from);
2119 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2120 delete_basic_block (to_remove);
2122 update_forwarder_flag (redirect_from);
2123 if (redirect_to != src2)
2124 update_forwarder_flag (src2);
2129 /* Search the predecessors of BB for common insn sequences. When found,
2130 share code between them by redirecting control flow. Return true if
2131 any changes made. */
2134 try_crossjump_bb (int mode, basic_block bb)
2136 edge e, e2, fallthru;
2138 unsigned max, ix, ix2;
2140 /* Nothing to do if there is not at least two incoming edges. */
2141 if (EDGE_COUNT (bb->preds) < 2)
2144 /* Don't crossjump if this block ends in a computed jump,
2145 unless we are optimizing for size. */
2146 if (optimize_bb_for_size_p (bb)
2147 && bb != EXIT_BLOCK_PTR
2148 && computed_jump_p (BB_END (bb)))
2151 /* If we are partitioning hot/cold basic blocks, we don't want to
2152 mess up unconditional or indirect jumps that cross between hot
2155 Basic block partitioning may result in some jumps that appear to
2156 be optimizable (or blocks that appear to be mergeable), but which really
2157 must be left untouched (they are required to make it safely across
2158 partition boundaries). See the comments at the top of
2159 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2161 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2162 BB_PARTITION (EDGE_PRED (bb, 1)->src)
2163 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2166 /* It is always cheapest to redirect a block that ends in a branch to
2167 a block that falls through into BB, as that adds no branches to the
2168 program. We'll try that combination first. */
2170 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2172 if (EDGE_COUNT (bb->preds) > max)
2175 fallthru = find_fallthru_edge (bb->preds);
2178 for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2180 e = EDGE_PRED (bb, ix);
2183 /* As noted above, first try with the fallthru predecessor (or, a
2184 fallthru predecessor if we are in cfglayout mode). */
2187 /* Don't combine the fallthru edge into anything else.
2188 If there is a match, we'll do it the other way around. */
2191 /* If nothing changed since the last attempt, there is nothing
2194 && !((e->src->flags & BB_MODIFIED)
2195 || (fallthru->src->flags & BB_MODIFIED)))
2198 if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2206 /* Non-obvious work limiting check: Recognize that we're going
2207 to call try_crossjump_bb on every basic block. So if we have
2208 two blocks with lots of outgoing edges (a switch) and they
2209 share lots of common destinations, then we would do the
2210 cross-jump check once for each common destination.
2212 Now, if the blocks actually are cross-jump candidates, then
2213 all of their destinations will be shared. Which means that
2214 we only need check them for cross-jump candidacy once. We
2215 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2216 choosing to do the check from the block for which the edge
2217 in question is the first successor of A. */
2218 if (EDGE_SUCC (e->src, 0) != e)
2221 for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2223 e2 = EDGE_PRED (bb, ix2);
2228 /* We've already checked the fallthru edge above. */
2232 /* The "first successor" check above only prevents multiple
2233 checks of crossjump(A,B). In order to prevent redundant
2234 checks of crossjump(B,A), require that A be the block
2235 with the lowest index. */
2236 if (e->src->index > e2->src->index)
2239 /* If nothing changed since the last attempt, there is nothing
2242 && !((e->src->flags & BB_MODIFIED)
2243 || (e2->src->flags & BB_MODIFIED)))
2246 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2248 if (try_crossjump_to_edge (mode, e, e2, dir_both))
2258 crossjumps_occured = true;
2263 /* Search the successors of BB for common insn sequences. When found,
2264 share code between them by moving it across the basic block
2265 boundary. Return true if any changes made. */
2268 try_head_merge_bb (basic_block bb)
2270 basic_block final_dest_bb = NULL;
2271 int max_match = INT_MAX;
2273 rtx *headptr, *currptr, *nextptr;
2274 bool changed, moveall;
2276 rtx e0_last_head, cond, move_before;
2277 unsigned nedges = EDGE_COUNT (bb->succs);
2278 rtx jump = BB_END (bb);
2279 regset live, live_union;
2281 /* Nothing to do if there is not at least two outgoing edges. */
2285 /* Don't crossjump if this block ends in a computed jump,
2286 unless we are optimizing for size. */
2287 if (optimize_bb_for_size_p (bb)
2288 && bb != EXIT_BLOCK_PTR
2289 && computed_jump_p (BB_END (bb)))
2292 cond = get_condition (jump, &move_before, true, false);
2293 if (cond == NULL_RTX)
2296 if (reg_mentioned_p (cc0_rtx, jump))
2297 move_before = prev_nonnote_nondebug_insn (jump);
2303 for (ix = 0; ix < nedges; ix++)
2304 if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR)
2307 for (ix = 0; ix < nedges; ix++)
2309 edge e = EDGE_SUCC (bb, ix);
2310 basic_block other_bb = e->dest;
2312 if (df_get_bb_dirty (other_bb))
2314 block_was_dirty = true;
2318 if (e->flags & EDGE_ABNORMAL)
2321 /* Normally, all destination blocks must only be reachable from this
2322 block, i.e. they must have one incoming edge.
2324 There is one special case we can handle, that of multiple consecutive
2325 jumps where the first jumps to one of the targets of the second jump.
2326 This happens frequently in switch statements for default labels.
2327 The structure is as follows:
2333 jump with targets A, B, C, D...
2335 has two incoming edges, from FINAL_DEST_BB and BB
2337 In this case, we can try to move the insns through BB and into
2339 if (EDGE_COUNT (other_bb->preds) != 1)
2341 edge incoming_edge, incoming_bb_other_edge;
2344 if (final_dest_bb != NULL
2345 || EDGE_COUNT (other_bb->preds) != 2)
2348 /* We must be able to move the insns across the whole block. */
2349 move_before = BB_HEAD (bb);
2350 while (!NONDEBUG_INSN_P (move_before))
2351 move_before = NEXT_INSN (move_before);
2353 if (EDGE_COUNT (bb->preds) != 1)
2355 incoming_edge = EDGE_PRED (bb, 0);
2356 final_dest_bb = incoming_edge->src;
2357 if (EDGE_COUNT (final_dest_bb->succs) != 2)
2359 FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2360 if (incoming_bb_other_edge != incoming_edge)
2362 if (incoming_bb_other_edge->dest != other_bb)
2367 e0 = EDGE_SUCC (bb, 0);
2368 e0_last_head = NULL_RTX;
2371 for (ix = 1; ix < nedges; ix++)
2373 edge e = EDGE_SUCC (bb, ix);
2374 rtx e0_last, e_last;
2377 nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2378 &e0_last, &e_last, 0);
2382 if (nmatch < max_match)
2385 e0_last_head = e0_last;
2389 /* If we matched an entire block, we probably have to avoid moving the
2392 && e0_last_head == BB_END (e0->dest)
2393 && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2394 || control_flow_insn_p (e0_last_head)))
2400 e0_last_head = prev_real_insn (e0_last_head);
2401 while (DEBUG_INSN_P (e0_last_head));
2407 /* We must find a union of the live registers at each of the end points. */
2408 live = BITMAP_ALLOC (NULL);
2409 live_union = BITMAP_ALLOC (NULL);
2411 currptr = XNEWVEC (rtx, nedges);
2412 headptr = XNEWVEC (rtx, nedges);
2413 nextptr = XNEWVEC (rtx, nedges);
2415 for (ix = 0; ix < nedges; ix++)
2418 basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2419 rtx head = BB_HEAD (merge_bb);
2421 while (!NONDEBUG_INSN_P (head))
2422 head = NEXT_INSN (head);
2426 /* Compute the end point and live information */
2427 for (j = 1; j < max_match; j++)
2429 head = NEXT_INSN (head);
2430 while (!NONDEBUG_INSN_P (head));
2431 simulate_backwards_to_point (merge_bb, live, head);
2432 IOR_REG_SET (live_union, live);
2435 /* If we're moving across two blocks, verify the validity of the
2436 first move, then adjust the target and let the loop below deal
2437 with the final move. */
2438 if (final_dest_bb != NULL)
2442 moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2443 jump, e0->dest, live_union,
2447 if (move_upto == NULL_RTX)
2450 while (e0_last_head != move_upto)
2452 df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2454 e0_last_head = PREV_INSN (e0_last_head);
2457 if (e0_last_head == NULL_RTX)
2460 jump = BB_END (final_dest_bb);
2461 cond = get_condition (jump, &move_before, true, false);
2462 if (cond == NULL_RTX)
2465 if (reg_mentioned_p (cc0_rtx, jump))
2466 move_before = prev_nonnote_nondebug_insn (jump);
2476 moveall = can_move_insns_across (currptr[0], e0_last_head,
2477 move_before, jump, e0->dest, live_union,
2479 if (!moveall && move_upto == NULL_RTX)
2481 if (jump == move_before)
2484 /* Try again, using a different insertion point. */
2488 /* Don't try moving before a cc0 user, as that may invalidate
2490 if (reg_mentioned_p (cc0_rtx, jump))
2497 if (final_dest_bb && !moveall)
2498 /* We haven't checked whether a partial move would be OK for the first
2499 move, so we have to fail this case. */
2505 if (currptr[0] == move_upto)
2507 for (ix = 0; ix < nedges; ix++)
2509 rtx curr = currptr[ix];
2511 curr = NEXT_INSN (curr);
2512 while (!NONDEBUG_INSN_P (curr));
2517 /* If we can't currently move all of the identical insns, remember
2518 each insn after the range that we'll merge. */
2520 for (ix = 0; ix < nedges; ix++)
2522 rtx curr = currptr[ix];
2524 curr = NEXT_INSN (curr);
2525 while (!NONDEBUG_INSN_P (curr));
2529 reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2530 df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2531 if (final_dest_bb != NULL)
2532 df_set_bb_dirty (final_dest_bb);
2533 df_set_bb_dirty (bb);
2534 for (ix = 1; ix < nedges; ix++)
2536 df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2537 delete_insn_chain (headptr[ix], currptr[ix], false);
2541 if (jump == move_before)
2544 /* For the unmerged insns, try a different insertion point. */
2548 /* Don't try moving before a cc0 user, as that may invalidate
2550 if (reg_mentioned_p (cc0_rtx, jump))
2554 for (ix = 0; ix < nedges; ix++)
2555 currptr[ix] = headptr[ix] = nextptr[ix];
2565 crossjumps_occured |= changed;
2570 /* Return true if BB contains just bb note, or bb note followed
2571 by only DEBUG_INSNs. */
2574 trivially_empty_bb_p (basic_block bb)
2576 rtx insn = BB_END (bb);
2580 if (insn == BB_HEAD (bb))
2582 if (!DEBUG_INSN_P (insn))
2584 insn = PREV_INSN (insn);
2588 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2589 instructions etc. Return nonzero if changes were made. */
2592 try_optimize_cfg (int mode)
2594 bool changed_overall = false;
2597 basic_block bb, b, next;
2599 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2602 crossjumps_occured = false;
2605 update_forwarder_flag (bb);
2607 if (! targetm.cannot_modify_jumps_p ())
2610 /* Attempt to merge blocks as made possible by edge removal. If
2611 a block has only one successor, and the successor has only
2612 one predecessor, they may be combined. */
2615 block_was_dirty = false;
2621 "\n\ntry_optimize_cfg iteration %i\n\n",
2624 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
2628 bool changed_here = false;
2630 /* Delete trivially dead basic blocks. This is either
2631 blocks with no predecessors, or empty blocks with no
2632 successors. However if the empty block with no
2633 successors is the successor of the ENTRY_BLOCK, it is
2634 kept. This ensures that the ENTRY_BLOCK will have a
2635 successor which is a precondition for many RTL
2636 passes. Empty blocks may result from expanding
2637 __builtin_unreachable (). */
2638 if (EDGE_COUNT (b->preds) == 0
2639 || (EDGE_COUNT (b->succs) == 0
2640 && trivially_empty_bb_p (b)
2641 && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
2644 if (EDGE_COUNT (b->preds) > 0)
2649 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2652 && BARRIER_P (BB_FOOTER (b)))
2653 FOR_EACH_EDGE (e, ei, b->preds)
2654 if ((e->flags & EDGE_FALLTHRU)
2655 && BB_FOOTER (e->src) == NULL)
2659 BB_FOOTER (e->src) = BB_FOOTER (b);
2660 BB_FOOTER (b) = NULL;
2665 BB_FOOTER (e->src) = emit_barrier ();
2672 rtx last = get_last_bb_insn (b);
2673 if (last && BARRIER_P (last))
2674 FOR_EACH_EDGE (e, ei, b->preds)
2675 if ((e->flags & EDGE_FALLTHRU))
2676 emit_barrier_after (BB_END (e->src));
2679 delete_basic_block (b);
2681 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
2682 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
2686 /* Remove code labels no longer used. */
2687 if (single_pred_p (b)
2688 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2689 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2690 && LABEL_P (BB_HEAD (b))
2691 /* If the previous block ends with a branch to this
2692 block, we can't delete the label. Normally this
2693 is a condjump that is yet to be simplified, but
2694 if CASE_DROPS_THRU, this can be a tablejump with
2695 some element going to the same place as the
2696 default (fallthru). */
2697 && (single_pred (b) == ENTRY_BLOCK_PTR
2698 || !JUMP_P (BB_END (single_pred (b)))
2699 || ! label_is_jump_target_p (BB_HEAD (b),
2700 BB_END (single_pred (b)))))
2702 delete_insn (BB_HEAD (b));
2704 fprintf (dump_file, "Deleted label in block %i.\n",
2708 /* If we fall through an empty block, we can remove it. */
2709 if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2710 && single_pred_p (b)
2711 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2712 && !LABEL_P (BB_HEAD (b))
2713 && FORWARDER_BLOCK_P (b)
2714 /* Note that forwarder_block_p true ensures that
2715 there is a successor for this block. */
2716 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2717 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2721 "Deleting fallthru block %i.\n",
2724 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2725 redirect_edge_succ_nodup (single_pred_edge (b),
2727 delete_basic_block (b);
2733 /* Merge B with its single successor, if any. */
2734 if (single_succ_p (b)
2735 && (s = single_succ_edge (b))
2736 && !(s->flags & EDGE_COMPLEX)
2737 && (c = s->dest) != EXIT_BLOCK_PTR
2738 && single_pred_p (c)
2741 /* When not in cfg_layout mode use code aware of reordering
2742 INSN. This code possibly creates new basic blocks so it
2743 does not fit merge_blocks interface and is kept here in
2744 hope that it will become useless once more of compiler
2745 is transformed to use cfg_layout mode. */
2747 if ((mode & CLEANUP_CFGLAYOUT)
2748 && can_merge_blocks_p (b, c))
2750 merge_blocks (b, c);
2751 update_forwarder_flag (b);
2752 changed_here = true;
2754 else if (!(mode & CLEANUP_CFGLAYOUT)
2755 /* If the jump insn has side effects,
2756 we can't kill the edge. */
2757 && (!JUMP_P (BB_END (b))
2758 || (reload_completed
2759 ? simplejump_p (BB_END (b))
2760 : (onlyjump_p (BB_END (b))
2761 && !tablejump_p (BB_END (b),
2763 && (next = merge_blocks_move (s, b, c, mode)))
2766 changed_here = true;
2770 /* Simplify branch over branch. */
2771 if ((mode & CLEANUP_EXPENSIVE)
2772 && !(mode & CLEANUP_CFGLAYOUT)
2773 && try_simplify_condjump (b))
2774 changed_here = true;
2776 /* If B has a single outgoing edge, but uses a
2777 non-trivial jump instruction without side-effects, we
2778 can either delete the jump entirely, or replace it
2779 with a simple unconditional jump. */
2780 if (single_succ_p (b)
2781 && single_succ (b) != EXIT_BLOCK_PTR
2782 && onlyjump_p (BB_END (b))
2783 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2784 && try_redirect_by_replacing_jump (single_succ_edge (b),
2786 (mode & CLEANUP_CFGLAYOUT) != 0))
2788 update_forwarder_flag (b);
2789 changed_here = true;
2792 /* Simplify branch to branch. */
2793 if (try_forward_edges (mode, b))
2795 update_forwarder_flag (b);
2796 changed_here = true;
2799 /* Look for shared code between blocks. */
2800 if ((mode & CLEANUP_CROSSJUMP)
2801 && try_crossjump_bb (mode, b))
2802 changed_here = true;
2804 if ((mode & CLEANUP_CROSSJUMP)
2805 /* This can lengthen register lifetimes. Do it only after
2808 && try_head_merge_bb (b))
2809 changed_here = true;
2811 /* Don't get confused by the index shift caused by
2819 if ((mode & CLEANUP_CROSSJUMP)
2820 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2823 if (block_was_dirty)
2825 /* This should only be set by head-merging. */
2826 gcc_assert (mode & CLEANUP_CROSSJUMP);
2830 #ifdef ENABLE_CHECKING
2832 verify_flow_info ();
2835 changed_overall |= changed;
2842 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2844 return changed_overall;
2847 /* Delete all unreachable basic blocks. */
2850 delete_unreachable_blocks (void)
2852 bool changed = false;
2853 basic_block b, prev_bb;
2855 find_unreachable_blocks ();
2857 /* When we're in GIMPLE mode and there may be debug insns, we should
2858 delete blocks in reverse dominator order, so as to get a chance
2859 to substitute all released DEFs into debug stmts. If we don't
2860 have dominators information, walking blocks backward gets us a
2861 better chance of retaining most debug information than
2863 if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
2864 && dom_info_available_p (CDI_DOMINATORS))
2866 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2868 prev_bb = b->prev_bb;
2870 if (!(b->flags & BB_REACHABLE))
2872 /* Speed up the removal of blocks that don't dominate
2873 others. Walking backwards, this should be the common
2875 if (!first_dom_son (CDI_DOMINATORS, b))
2876 delete_basic_block (b);
2880 = get_all_dominated_blocks (CDI_DOMINATORS, b);
2886 prev_bb = b->prev_bb;
2888 gcc_assert (!(b->flags & BB_REACHABLE));
2890 delete_basic_block (b);
2902 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2904 prev_bb = b->prev_bb;
2906 if (!(b->flags & BB_REACHABLE))
2908 delete_basic_block (b);
2915 tidy_fallthru_edges ();
2919 /* Delete any jump tables never referenced. We can't delete them at the
2920 time of removing tablejump insn as they are referenced by the preceding
2921 insns computing the destination, so we delay deleting and garbagecollect
2922 them once life information is computed. */
2924 delete_dead_jumptables (void)
2928 /* A dead jump table does not belong to any basic block. Scan insns
2929 between two adjacent basic blocks. */
2934 for (insn = NEXT_INSN (BB_END (bb));
2935 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2938 next = NEXT_INSN (insn);
2940 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2941 && JUMP_TABLE_DATA_P (next))
2943 rtx label = insn, jump = next;
2946 fprintf (dump_file, "Dead jumptable %i removed\n",
2949 next = NEXT_INSN (next);
2951 delete_insn (label);
2958 /* Tidy the CFG by deleting unreachable code and whatnot. */
2961 cleanup_cfg (int mode)
2963 bool changed = false;
2965 /* Set the cfglayout mode flag here. We could update all the callers
2966 but that is just inconvenient, especially given that we eventually
2967 want to have cfglayout mode as the default. */
2968 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2969 mode |= CLEANUP_CFGLAYOUT;
2971 timevar_push (TV_CLEANUP_CFG);
2972 if (delete_unreachable_blocks ())
2975 /* We've possibly created trivially dead code. Cleanup it right
2976 now to introduce more opportunities for try_optimize_cfg. */
2977 if (!(mode & (CLEANUP_NO_INSN_DEL))
2978 && !reload_completed)
2979 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2984 /* To tail-merge blocks ending in the same noreturn function (e.g.
2985 a call to abort) we have to insert fake edges to exit. Do this
2986 here once. The fake edges do not interfere with any other CFG
2988 if (mode & CLEANUP_CROSSJUMP)
2989 add_noreturn_fake_exit_edges ();
2991 if (!dbg_cnt (cfg_cleanup))
2994 while (try_optimize_cfg (mode))
2996 delete_unreachable_blocks (), changed = true;
2997 if (!(mode & CLEANUP_NO_INSN_DEL))
2999 /* Try to remove some trivially dead insns when doing an expensive
3000 cleanup. But delete_trivially_dead_insns doesn't work after
3001 reload (it only handles pseudos) and run_fast_dce is too costly
3002 to run in every iteration.
3004 For effective cross jumping, we really want to run a fast DCE to
3005 clean up any dead conditions, or they get in the way of performing
3008 Other transformations in cleanup_cfg are not so sensitive to dead
3009 code, so delete_trivially_dead_insns or even doing nothing at all
3011 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3012 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3014 if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
3021 if (mode & CLEANUP_CROSSJUMP)
3022 remove_fake_exit_edges ();
3024 /* Don't call delete_dead_jumptables in cfglayout mode, because
3025 that function assumes that jump tables are in the insns stream.
3026 But we also don't _have_ to delete dead jumptables in cfglayout
3027 mode because we shouldn't even be looking at things that are
3028 not in a basic block. Dead jumptables are cleaned up when
3029 going out of cfglayout mode. */
3030 if (!(mode & CLEANUP_CFGLAYOUT))
3031 delete_dead_jumptables ();
3033 /* ??? We probably do this way too often. */
3036 || (mode & CLEANUP_CFG_CHANGED)))
3038 timevar_push (TV_REPAIR_LOOPS);
3039 /* The above doesn't preserve dominance info if available. */
3040 gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3041 calculate_dominance_info (CDI_DOMINATORS);
3042 fix_loop_structure (NULL);
3043 free_dominance_info (CDI_DOMINATORS);
3044 timevar_pop (TV_REPAIR_LOOPS);
3047 timevar_pop (TV_CLEANUP_CFG);
3055 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3057 dump_flow_info (dump_file, dump_flags);
3058 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3059 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3063 struct rtl_opt_pass pass_jump =
3068 OPTGROUP_NONE, /* optinfo_flags */
3070 execute_jump, /* execute */
3073 0, /* static_pass_number */
3074 TV_JUMP, /* tv_id */
3075 0, /* properties_required */
3076 0, /* properties_provided */
3077 0, /* properties_destroyed */
3078 TODO_ggc_collect, /* todo_flags_start */
3079 TODO_verify_rtl_sharing, /* todo_flags_finish */
3084 execute_jump2 (void)
3086 cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3090 struct rtl_opt_pass pass_jump2 =
3095 OPTGROUP_NONE, /* optinfo_flags */
3097 execute_jump2, /* execute */
3100 0, /* static_pass_number */
3101 TV_JUMP, /* tv_id */
3102 0, /* properties_required */
3103 0, /* properties_provided */
3104 0, /* properties_destroyed */
3105 TODO_ggc_collect, /* todo_flags_start */
3106 TODO_verify_rtl_sharing, /* todo_flags_finish */