1 /* Loop unrolling and peeling.
2 Copyright (C) 2002-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
26 #include "hard-reg-set.h"
28 #include "basic-block.h"
32 #include "hash-table.h"
37 /* This pass performs loop unrolling and peeling. We only perform these
38 optimizations on innermost loops (with single exception) because
39 the impact on performance is greatest here, and we want to avoid
40 unnecessary code size growth. The gain is caused by greater sequentiality
41 of code, better code to optimize for further passes and in some cases
42 by fewer testings of exit conditions. The main problem is code growth,
43 that impacts performance negatively due to effect of caches.
47 -- complete peeling of once-rolling loops; this is the above mentioned
48 exception, as this causes loop to be cancelled completely and
49 does not cause code growth
50 -- complete peeling of loops that roll (small) constant times.
51 -- simple peeling of first iterations of loops that do not roll much
52 (according to profile feedback)
53 -- unrolling of loops that roll constant times; this is almost always
54 win, as we get rid of exit condition tests.
55 -- unrolling of loops that roll number of times that we can compute
56 in runtime; we also get rid of exit condition tests here, but there
57 is the extra expense for calculating the number of iterations
58 -- simple unrolling of remaining loops; this is performed only if we
59 are asked to, as the gain is questionable in this case and often
60 it may even slow down the code
61 For more detailed descriptions of each of those, see comments at
62 appropriate function below.
64 There is a lot of parameters (defined and described in params.def) that
65 control how much we unroll/peel.
67 ??? A great problem is that we don't have a good way how to determine
68 how many times we should unroll the loop; the experiments I have made
69 showed that this choice may affect performance in order of several %.
72 /* Information about induction variables to split. */
76 rtx insn; /* The insn in that the induction variable occurs. */
77 rtx orig_var; /* The variable (register) for the IV before split. */
78 rtx base_var; /* The variable on that the values in the further
79 iterations are based. */
80 rtx step; /* Step of the induction variable. */
81 struct iv_to_split *next; /* Next entry in walking order. */
83 unsigned loc[3]; /* Location where the definition of the induction
84 variable occurs in the insn. For example if
85 N_LOC is 2, the expression is located at
86 XEXP (XEXP (single_set, loc[0]), loc[1]). */
89 /* Information about accumulators to expand. */
93 rtx insn; /* The insn in that the variable expansion occurs. */
94 rtx reg; /* The accumulator which is expanded. */
95 vec<rtx> var_expansions; /* The copies of the accumulator which is expanded. */
96 struct var_to_expand *next; /* Next entry in walking order. */
97 enum rtx_code op; /* The type of the accumulation - addition, subtraction
99 int expansion_count; /* Count the number of expansions generated so far. */
100 int reuse_expansion; /* The expansion we intend to reuse to expand
101 the accumulator. If REUSE_EXPANSION is 0 reuse
102 the original accumulator. Else use
103 var_expansions[REUSE_EXPANSION - 1]. */
106 /* Hashtable helper for iv_to_split. */
108 struct iv_split_hasher : typed_free_remove <iv_to_split>
110 typedef iv_to_split value_type;
111 typedef iv_to_split compare_type;
112 static inline hashval_t hash (const value_type *);
113 static inline bool equal (const value_type *, const compare_type *);
117 /* A hash function for information about insns to split. */
120 iv_split_hasher::hash (const value_type *ivts)
122 return (hashval_t) INSN_UID (ivts->insn);
125 /* An equality functions for information about insns to split. */
128 iv_split_hasher::equal (const value_type *i1, const compare_type *i2)
130 return i1->insn == i2->insn;
133 /* Hashtable helper for iv_to_split. */
135 struct var_expand_hasher : typed_free_remove <var_to_expand>
137 typedef var_to_expand value_type;
138 typedef var_to_expand compare_type;
139 static inline hashval_t hash (const value_type *);
140 static inline bool equal (const value_type *, const compare_type *);
143 /* Return a hash for VES. */
146 var_expand_hasher::hash (const value_type *ves)
148 return (hashval_t) INSN_UID (ves->insn);
151 /* Return true if I1 and I2 refer to the same instruction. */
154 var_expand_hasher::equal (const value_type *i1, const compare_type *i2)
156 return i1->insn == i2->insn;
159 /* Information about optimization applied in
160 the unrolled loop. */
164 hash_table <iv_split_hasher> insns_to_split; /* A hashtable of insns to
166 struct iv_to_split *iv_to_split_head; /* The first iv to split. */
167 struct iv_to_split **iv_to_split_tail; /* Pointer to the tail of the list. */
168 hash_table <var_expand_hasher> insns_with_var_to_expand; /* A hashtable of
169 insns with accumulators to expand. */
170 struct var_to_expand *var_to_expand_head; /* The first var to expand. */
171 struct var_to_expand **var_to_expand_tail; /* Pointer to the tail of the list. */
172 unsigned first_new_block; /* The first basic block that was
174 basic_block loop_exit; /* The loop exit basic block. */
175 basic_block loop_preheader; /* The loop preheader basic block. */
178 static void decide_unrolling_and_peeling (int);
179 static void peel_loops_completely (int);
180 static void decide_peel_simple (struct loop *, int);
181 static void decide_peel_once_rolling (struct loop *, int);
182 static void decide_peel_completely (struct loop *, int);
183 static void decide_unroll_stupid (struct loop *, int);
184 static void decide_unroll_constant_iterations (struct loop *, int);
185 static void decide_unroll_runtime_iterations (struct loop *, int);
186 static void peel_loop_simple (struct loop *);
187 static void peel_loop_completely (struct loop *);
188 static void unroll_loop_stupid (struct loop *);
189 static void unroll_loop_constant_iterations (struct loop *);
190 static void unroll_loop_runtime_iterations (struct loop *);
191 static struct opt_info *analyze_insns_in_loop (struct loop *);
192 static void opt_info_start_duplication (struct opt_info *);
193 static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool);
194 static void free_opt_info (struct opt_info *);
195 static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx);
196 static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx, int *);
197 static struct iv_to_split *analyze_iv_to_split_insn (rtx);
198 static void expand_var_during_unrolling (struct var_to_expand *, rtx);
199 static void insert_var_expansion_initialization (struct var_to_expand *,
201 static void combine_var_copies_in_loop_exit (struct var_to_expand *,
203 static rtx get_expansion (struct var_to_expand *);
205 /* Emit a message summarizing the unroll or peel that will be
206 performed for LOOP, along with the loop's location LOCUS, if
207 appropriate given the dump or -fopt-info settings. */
210 report_unroll_peel (struct loop *loop, location_t locus)
212 struct niter_desc *desc;
214 int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS;
216 if (loop->lpt_decision.decision == LPT_NONE)
219 if (!dump_enabled_p ())
222 /* In the special case where the loop never iterated, emit
223 a different message so that we don't report an unroll by 0.
224 This matches the equivalent message emitted during tree unrolling. */
225 if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY
226 && !loop->lpt_decision.times)
228 dump_printf_loc (report_flags, locus,
229 "loop turned into non-loop; it never loops.\n");
233 desc = get_simple_loop_desc (loop);
235 if (desc->const_iter)
236 niters = desc->niter;
237 else if (loop->header->count)
238 niters = expected_loop_iterations (loop);
240 if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY)
241 dump_printf_loc (report_flags, locus,
242 "loop with %d iterations completely unrolled",
243 loop->lpt_decision.times + 1);
245 dump_printf_loc (report_flags, locus,
247 (loop->lpt_decision.decision == LPT_PEEL_SIMPLE
248 ? "peeled" : "unrolled"),
249 loop->lpt_decision.times);
251 dump_printf (report_flags,
252 " (header execution count %d",
253 (int)loop->header->count);
254 if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY)
255 dump_printf (report_flags,
256 "%s%s iterations %d)",
257 profile_info ? ", " : " (",
258 desc->const_iter ? "const" : "average",
260 else if (profile_info)
261 dump_printf (report_flags, ")");
263 dump_printf (report_flags, "\n");
266 /* Unroll and/or peel (depending on FLAGS) LOOPS. */
268 unroll_and_peel_loops (int flags)
271 bool changed = false;
273 /* First perform complete loop peeling (it is almost surely a win,
274 and affects parameters for further decision a lot). */
275 peel_loops_completely (flags);
277 /* Now decide rest of unrolling and peeling. */
278 decide_unrolling_and_peeling (flags);
280 /* Scan the loops, inner ones first. */
281 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
283 /* And perform the appropriate transformations. */
284 switch (loop->lpt_decision.decision)
286 case LPT_PEEL_COMPLETELY:
289 case LPT_PEEL_SIMPLE:
290 peel_loop_simple (loop);
293 case LPT_UNROLL_CONSTANT:
294 unroll_loop_constant_iterations (loop);
297 case LPT_UNROLL_RUNTIME:
298 unroll_loop_runtime_iterations (loop);
301 case LPT_UNROLL_STUPID:
302 unroll_loop_stupid (loop);
314 calculate_dominance_info (CDI_DOMINATORS);
315 fix_loop_structure (NULL);
321 /* Check whether exit of the LOOP is at the end of loop body. */
324 loop_exit_at_end_p (struct loop *loop)
326 struct niter_desc *desc = get_simple_loop_desc (loop);
329 if (desc->in_edge->dest != loop->latch)
332 /* Check that the latch is empty. */
333 FOR_BB_INSNS (loop->latch, insn)
335 if (NONDEBUG_INSN_P (insn))
342 /* Depending on FLAGS, check whether to peel loops completely and do so. */
344 peel_loops_completely (int flags)
347 bool changed = false;
349 /* Scan the loops, the inner ones first. */
350 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
352 loop->lpt_decision.decision = LPT_NONE;
353 location_t locus = get_loop_location (loop);
355 if (dump_enabled_p ())
356 dump_printf_loc (TDF_RTL, locus,
357 ";; *** Considering loop %d at BB %d for "
358 "complete peeling ***\n",
359 loop->num, loop->header->index);
361 loop->ninsns = num_loop_insns (loop);
363 decide_peel_once_rolling (loop, flags);
364 if (loop->lpt_decision.decision == LPT_NONE)
365 decide_peel_completely (loop, flags);
367 if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY)
369 report_unroll_peel (loop, locus);
370 peel_loop_completely (loop);
377 calculate_dominance_info (CDI_DOMINATORS);
378 fix_loop_structure (NULL);
382 /* Decide whether unroll or peel loops (depending on FLAGS) and how much. */
384 decide_unrolling_and_peeling (int flags)
388 /* Scan the loops, inner ones first. */
389 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
391 loop->lpt_decision.decision = LPT_NONE;
392 location_t locus = get_loop_location (loop);
394 if (dump_enabled_p ())
395 dump_printf_loc (TDF_RTL, locus,
396 ";; *** Considering loop %d at BB %d for "
397 "unrolling and peeling ***\n",
398 loop->num, loop->header->index);
400 /* Do not peel cold areas. */
401 if (optimize_loop_for_size_p (loop))
404 fprintf (dump_file, ";; Not considering loop, cold area\n");
408 /* Can the loop be manipulated? */
409 if (!can_duplicate_loop_p (loop))
413 ";; Not considering loop, cannot duplicate\n");
417 /* Skip non-innermost loops. */
421 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
425 loop->ninsns = num_loop_insns (loop);
426 loop->av_ninsns = average_num_loop_insns (loop);
428 /* Try transformations one by one in decreasing order of
431 decide_unroll_constant_iterations (loop, flags);
432 if (loop->lpt_decision.decision == LPT_NONE)
433 decide_unroll_runtime_iterations (loop, flags);
434 if (loop->lpt_decision.decision == LPT_NONE)
435 decide_unroll_stupid (loop, flags);
436 if (loop->lpt_decision.decision == LPT_NONE)
437 decide_peel_simple (loop, flags);
439 report_unroll_peel (loop, locus);
443 /* Decide whether the LOOP is once rolling and suitable for complete
446 decide_peel_once_rolling (struct loop *loop, int flags ATTRIBUTE_UNUSED)
448 struct niter_desc *desc;
451 fprintf (dump_file, "\n;; Considering peeling once rolling loop\n");
453 /* Is the loop small enough? */
454 if ((unsigned) PARAM_VALUE (PARAM_MAX_ONCE_PEELED_INSNS) < loop->ninsns)
457 fprintf (dump_file, ";; Not considering loop, is too big\n");
461 /* Check for simple loops. */
462 desc = get_simple_loop_desc (loop);
464 /* Check number of iterations. */
470 && get_max_loop_iterations_int (loop) != 0))
474 ";; Unable to prove that the loop rolls exactly once\n");
479 loop->lpt_decision.decision = LPT_PEEL_COMPLETELY;
482 /* Decide whether the LOOP is suitable for complete peeling. */
484 decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED)
487 struct niter_desc *desc;
490 fprintf (dump_file, "\n;; Considering peeling completely\n");
492 /* Skip non-innermost loops. */
496 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
500 /* Do not peel cold areas. */
501 if (optimize_loop_for_size_p (loop))
504 fprintf (dump_file, ";; Not considering loop, cold area\n");
508 /* Can the loop be manipulated? */
509 if (!can_duplicate_loop_p (loop))
513 ";; Not considering loop, cannot duplicate\n");
517 /* npeel = number of iterations to peel. */
518 npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS) / loop->ninsns;
519 if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
520 npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
522 /* Is the loop small enough? */
526 fprintf (dump_file, ";; Not considering loop, is too big\n");
530 /* Check for simple loops. */
531 desc = get_simple_loop_desc (loop);
533 /* Check number of iterations. */
541 ";; Unable to prove that the loop iterates constant times\n");
545 if (desc->niter > npeel - 1)
550 ";; Not peeling loop completely, rolls too much (");
551 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter);
552 fprintf (dump_file, " iterations > %d [maximum peelings])\n", npeel);
558 loop->lpt_decision.decision = LPT_PEEL_COMPLETELY;
561 /* Peel all iterations of LOOP, remove exit edges and cancel the loop
562 completely. The transformation done:
564 for (i = 0; i < 4; i++)
576 peel_loop_completely (struct loop *loop)
579 unsigned HOST_WIDE_INT npeel;
582 struct niter_desc *desc = get_simple_loop_desc (loop);
583 struct opt_info *opt_info = NULL;
591 wont_exit = sbitmap_alloc (npeel + 1);
592 bitmap_ones (wont_exit);
593 bitmap_clear_bit (wont_exit, 0);
594 if (desc->noloop_assumptions)
595 bitmap_clear_bit (wont_exit, 1);
597 auto_vec<edge> remove_edges;
598 if (flag_split_ivs_in_unroller)
599 opt_info = analyze_insns_in_loop (loop);
601 opt_info_start_duplication (opt_info);
602 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
604 wont_exit, desc->out_edge,
606 DLTHE_FLAG_UPDATE_FREQ
607 | DLTHE_FLAG_COMPLETTE_PEEL
609 ? DLTHE_RECORD_COPY_NUMBER : 0));
616 apply_opt_in_copies (opt_info, npeel, false, true);
617 free_opt_info (opt_info);
620 /* Remove the exit edges. */
621 FOR_EACH_VEC_ELT (remove_edges, i, ein)
626 free_simple_loop_desc (loop);
628 /* Now remove the unreachable part of the last iteration and cancel
633 fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel);
636 /* Decide whether to unroll LOOP iterating constant number of times
640 decide_unroll_constant_iterations (struct loop *loop, int flags)
642 unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
643 struct niter_desc *desc;
644 widest_int iterations;
646 if (!(flags & UAP_UNROLL))
648 /* We were not asked to, just return back silently. */
654 "\n;; Considering unrolling loop with constant "
655 "number of iterations\n");
657 /* nunroll = total number of copies of the original loop body in
658 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
659 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
661 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
662 if (nunroll > nunroll_by_av)
663 nunroll = nunroll_by_av;
664 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
665 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
667 if (targetm.loop_unroll_adjust)
668 nunroll = targetm.loop_unroll_adjust (nunroll, loop);
670 /* Skip big loops. */
674 fprintf (dump_file, ";; Not considering loop, is too big\n");
678 /* Check for simple loops. */
679 desc = get_simple_loop_desc (loop);
681 /* Check number of iterations. */
682 if (!desc->simple_p || !desc->const_iter || desc->assumptions)
686 ";; Unable to prove that the loop iterates constant times\n");
690 /* Check whether the loop rolls enough to consider.
691 Consult also loop bounds and profile; in the case the loop has more
692 than one exit it may well loop less than determined maximal number
694 if (desc->niter < 2 * nunroll
695 || ((get_estimated_loop_iterations (loop, &iterations)
696 || get_max_loop_iterations (loop, &iterations))
697 && wi::ltu_p (iterations, 2 * nunroll)))
700 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
704 /* Success; now compute number of iterations to unroll. We alter
705 nunroll so that as few as possible copies of loop body are
706 necessary, while still not decreasing the number of unrollings
707 too much (at most by 1). */
708 best_copies = 2 * nunroll + 10;
711 if (i - 1 >= desc->niter)
714 for (; i >= nunroll - 1; i--)
716 unsigned exit_mod = desc->niter % (i + 1);
718 if (!loop_exit_at_end_p (loop))
719 n_copies = exit_mod + i + 1;
720 else if (exit_mod != (unsigned) i
721 || desc->noloop_assumptions != NULL_RTX)
722 n_copies = exit_mod + i + 2;
726 if (n_copies < best_copies)
728 best_copies = n_copies;
733 loop->lpt_decision.decision = LPT_UNROLL_CONSTANT;
734 loop->lpt_decision.times = best_unroll;
737 /* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES times.
738 The transformation does this:
740 for (i = 0; i < 102; i++)
743 ==> (LOOP->LPT_DECISION.TIMES == 3)
757 unroll_loop_constant_iterations (struct loop *loop)
759 unsigned HOST_WIDE_INT niter;
764 unsigned max_unroll = loop->lpt_decision.times;
765 struct niter_desc *desc = get_simple_loop_desc (loop);
766 bool exit_at_end = loop_exit_at_end_p (loop);
767 struct opt_info *opt_info = NULL;
772 /* Should not get here (such loop should be peeled instead). */
773 gcc_assert (niter > max_unroll + 1);
775 exit_mod = niter % (max_unroll + 1);
777 wont_exit = sbitmap_alloc (max_unroll + 1);
778 bitmap_ones (wont_exit);
780 auto_vec<edge> remove_edges;
781 if (flag_split_ivs_in_unroller
782 || flag_variable_expansion_in_unroller)
783 opt_info = analyze_insns_in_loop (loop);
787 /* The exit is not at the end of the loop; leave exit test
788 in the first copy, so that the loops that start with test
789 of exit condition have continuous body after unrolling. */
792 fprintf (dump_file, ";; Condition at beginning of loop.\n");
794 /* Peel exit_mod iterations. */
795 bitmap_clear_bit (wont_exit, 0);
796 if (desc->noloop_assumptions)
797 bitmap_clear_bit (wont_exit, 1);
801 opt_info_start_duplication (opt_info);
802 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
804 wont_exit, desc->out_edge,
806 DLTHE_FLAG_UPDATE_FREQ
807 | (opt_info && exit_mod > 1
808 ? DLTHE_RECORD_COPY_NUMBER
812 if (opt_info && exit_mod > 1)
813 apply_opt_in_copies (opt_info, exit_mod, false, false);
815 desc->noloop_assumptions = NULL_RTX;
816 desc->niter -= exit_mod;
817 loop->nb_iterations_upper_bound -= exit_mod;
818 if (loop->any_estimate
819 && wi::leu_p (exit_mod, loop->nb_iterations_estimate))
820 loop->nb_iterations_estimate -= exit_mod;
822 loop->any_estimate = false;
825 bitmap_set_bit (wont_exit, 1);
829 /* Leave exit test in last copy, for the same reason as above if
830 the loop tests the condition at the end of loop body. */
833 fprintf (dump_file, ";; Condition at end of loop.\n");
835 /* We know that niter >= max_unroll + 2; so we do not need to care of
836 case when we would exit before reaching the loop. So just peel
837 exit_mod + 1 iterations. */
838 if (exit_mod != max_unroll
839 || desc->noloop_assumptions)
841 bitmap_clear_bit (wont_exit, 0);
842 if (desc->noloop_assumptions)
843 bitmap_clear_bit (wont_exit, 1);
845 opt_info_start_duplication (opt_info);
846 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
848 wont_exit, desc->out_edge,
850 DLTHE_FLAG_UPDATE_FREQ
851 | (opt_info && exit_mod > 0
852 ? DLTHE_RECORD_COPY_NUMBER
856 if (opt_info && exit_mod > 0)
857 apply_opt_in_copies (opt_info, exit_mod + 1, false, false);
859 desc->niter -= exit_mod + 1;
860 loop->nb_iterations_upper_bound -= exit_mod + 1;
861 if (loop->any_estimate
862 && wi::leu_p (exit_mod + 1, loop->nb_iterations_estimate))
863 loop->nb_iterations_estimate -= exit_mod + 1;
865 loop->any_estimate = false;
866 desc->noloop_assumptions = NULL_RTX;
868 bitmap_set_bit (wont_exit, 0);
869 bitmap_set_bit (wont_exit, 1);
872 bitmap_clear_bit (wont_exit, max_unroll);
875 /* Now unroll the loop. */
877 opt_info_start_duplication (opt_info);
878 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
880 wont_exit, desc->out_edge,
882 DLTHE_FLAG_UPDATE_FREQ
884 ? DLTHE_RECORD_COPY_NUMBER
890 apply_opt_in_copies (opt_info, max_unroll, true, true);
891 free_opt_info (opt_info);
898 basic_block exit_block = get_bb_copy (desc->in_edge->src);
899 /* Find a new in and out edge; they are in the last copy we have made. */
901 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
903 desc->out_edge = EDGE_SUCC (exit_block, 0);
904 desc->in_edge = EDGE_SUCC (exit_block, 1);
908 desc->out_edge = EDGE_SUCC (exit_block, 1);
909 desc->in_edge = EDGE_SUCC (exit_block, 0);
913 desc->niter /= max_unroll + 1;
914 loop->nb_iterations_upper_bound
915 = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1);
916 if (loop->any_estimate)
917 loop->nb_iterations_estimate
918 = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
919 desc->niter_expr = GEN_INT (desc->niter);
921 /* Remove the edges. */
922 FOR_EACH_VEC_ELT (remove_edges, i, e)
927 ";; Unrolled loop %d times, constant # of iterations %i insns\n",
928 max_unroll, num_loop_insns (loop));
931 /* Decide whether to unroll LOOP iterating runtime computable number of times
934 decide_unroll_runtime_iterations (struct loop *loop, int flags)
936 unsigned nunroll, nunroll_by_av, i;
937 struct niter_desc *desc;
938 widest_int iterations;
940 if (!(flags & UAP_UNROLL))
942 /* We were not asked to, just return back silently. */
948 "\n;; Considering unrolling loop with runtime "
949 "computable number of iterations\n");
951 /* nunroll = total number of copies of the original loop body in
952 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
953 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
954 nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
955 if (nunroll > nunroll_by_av)
956 nunroll = nunroll_by_av;
957 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
958 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
960 if (targetm.loop_unroll_adjust)
961 nunroll = targetm.loop_unroll_adjust (nunroll, loop);
963 /* Skip big loops. */
967 fprintf (dump_file, ";; Not considering loop, is too big\n");
971 /* Check for simple loops. */
972 desc = get_simple_loop_desc (loop);
974 /* Check simpleness. */
975 if (!desc->simple_p || desc->assumptions)
979 ";; Unable to prove that the number of iterations "
980 "can be counted in runtime\n");
984 if (desc->const_iter)
987 fprintf (dump_file, ";; Loop iterates constant times\n");
991 /* Check whether the loop rolls. */
992 if ((get_estimated_loop_iterations (loop, &iterations)
993 || get_max_loop_iterations (loop, &iterations))
994 && wi::ltu_p (iterations, 2 * nunroll))
997 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
1001 /* Success; now force nunroll to be power of 2, as we are unable to
1002 cope with overflows in computation of number of iterations. */
1003 for (i = 1; 2 * i <= nunroll; i *= 2)
1006 loop->lpt_decision.decision = LPT_UNROLL_RUNTIME;
1007 loop->lpt_decision.times = i - 1;
1010 /* Splits edge E and inserts the sequence of instructions INSNS on it, and
1011 returns the newly created block. If INSNS is NULL_RTX, nothing is changed
1012 and NULL is returned instead. */
1015 split_edge_and_insert (edge e, rtx insns)
1021 bb = split_edge (e);
1022 emit_insn_after (insns, BB_END (bb));
1024 /* ??? We used to assume that INSNS can contain control flow insns, and
1025 that we had to try to find sub basic blocks in BB to maintain a valid
1026 CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB
1027 and call break_superblocks when going out of cfglayout mode. But it
1028 turns out that this never happens; and that if it does ever happen,
1029 the TODO_verify_flow at the end of the RTL loop passes would fail.
1031 There are two reasons why we expected we could have control flow insns
1032 in INSNS. The first is when a comparison has to be done in parts, and
1033 the second is when the number of iterations is computed for loops with
1034 the number of iterations known at runtime. In both cases, test cases
1035 to get control flow in INSNS appear to be impossible to construct:
1037 * If do_compare_rtx_and_jump needs several branches to do comparison
1038 in a mode that needs comparison by parts, we cannot analyze the
1039 number of iterations of the loop, and we never get to unrolling it.
1041 * The code in expand_divmod that was suspected to cause creation of
1042 branching code seems to be only accessed for signed division. The
1043 divisions used by # of iterations analysis are always unsigned.
1044 Problems might arise on architectures that emits branching code
1045 for some operations that may appear in the unroller (especially
1046 for division), but we have no such architectures.
1048 Considering all this, it was decided that we should for now assume
1049 that INSNS can in theory contain control flow insns, but in practice
1050 it never does. So we don't handle the theoretical case, and should
1051 a real failure ever show up, we have a pretty good clue for how to
1057 /* Unroll LOOP for which we are able to count number of iterations in runtime
1058 LOOP->LPT_DECISION.TIMES times. The transformation does this (with some
1059 extra care for case n < 0):
1061 for (i = 0; i < n; i++)
1064 ==> (LOOP->LPT_DECISION.TIMES == 3)
1089 unroll_loop_runtime_iterations (struct loop *loop)
1091 rtx old_niter, niter, init_code, branch_code, tmp;
1093 basic_block preheader, *body, swtch, ezc_swtch;
1098 bool extra_zero_check, last_may_exit;
1099 unsigned max_unroll = loop->lpt_decision.times;
1100 struct niter_desc *desc = get_simple_loop_desc (loop);
1101 bool exit_at_end = loop_exit_at_end_p (loop);
1102 struct opt_info *opt_info = NULL;
1105 if (flag_split_ivs_in_unroller
1106 || flag_variable_expansion_in_unroller)
1107 opt_info = analyze_insns_in_loop (loop);
1109 /* Remember blocks whose dominators will have to be updated. */
1110 auto_vec<basic_block> dom_bbs;
1112 body = get_loop_body (loop);
1113 for (i = 0; i < loop->num_nodes; i++)
1115 vec<basic_block> ldom;
1118 ldom = get_dominated_by (CDI_DOMINATORS, body[i]);
1119 FOR_EACH_VEC_ELT (ldom, j, bb)
1120 if (!flow_bb_inside_loop_p (loop, bb))
1121 dom_bbs.safe_push (bb);
1129 /* Leave exit in first copy (for explanation why see comment in
1130 unroll_loop_constant_iterations). */
1132 n_peel = max_unroll - 1;
1133 extra_zero_check = true;
1134 last_may_exit = false;
1138 /* Leave exit in last copy (for explanation why see comment in
1139 unroll_loop_constant_iterations). */
1140 may_exit_copy = max_unroll;
1141 n_peel = max_unroll;
1142 extra_zero_check = false;
1143 last_may_exit = true;
1146 /* Get expression for number of iterations. */
1148 old_niter = niter = gen_reg_rtx (desc->mode);
1149 tmp = force_operand (copy_rtx (desc->niter_expr), niter);
1151 emit_move_insn (niter, tmp);
1153 /* Count modulo by ANDing it with max_unroll; we use the fact that
1154 the number of unrollings is a power of two, and thus this is correct
1155 even if there is overflow in the computation. */
1156 niter = expand_simple_binop (desc->mode, AND,
1157 niter, gen_int_mode (max_unroll, desc->mode),
1158 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1160 init_code = get_insns ();
1162 unshare_all_rtl_in_chain (init_code);
1164 /* Precondition the loop. */
1165 split_edge_and_insert (loop_preheader_edge (loop), init_code);
1167 auto_vec<edge> remove_edges;
1169 wont_exit = sbitmap_alloc (max_unroll + 2);
1171 /* Peel the first copy of loop body (almost always we must leave exit test
1172 here; the only exception is when we have extra zero check and the number
1173 of iterations is reliable. Also record the place of (possible) extra
1175 bitmap_clear (wont_exit);
1176 if (extra_zero_check
1177 && !desc->noloop_assumptions)
1178 bitmap_set_bit (wont_exit, 1);
1179 ezc_swtch = loop_preheader_edge (loop)->src;
1180 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1181 1, wont_exit, desc->out_edge,
1183 DLTHE_FLAG_UPDATE_FREQ);
1186 /* Record the place where switch will be built for preconditioning. */
1187 swtch = split_edge (loop_preheader_edge (loop));
1189 for (i = 0; i < n_peel; i++)
1191 /* Peel the copy. */
1192 bitmap_clear (wont_exit);
1193 if (i != n_peel - 1 || !last_may_exit)
1194 bitmap_set_bit (wont_exit, 1);
1195 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1196 1, wont_exit, desc->out_edge,
1198 DLTHE_FLAG_UPDATE_FREQ);
1201 /* Create item for switch. */
1202 j = n_peel - i - (extra_zero_check ? 0 : 1);
1203 p = REG_BR_PROB_BASE / (i + 2);
1205 preheader = split_edge (loop_preheader_edge (loop));
1206 branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
1207 block_label (preheader), p,
1210 /* We rely on the fact that the compare and jump cannot be optimized out,
1211 and hence the cfg we create is correct. */
1212 gcc_assert (branch_code != NULL_RTX);
1214 swtch = split_edge_and_insert (single_pred_edge (swtch), branch_code);
1215 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
1216 single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p;
1217 e = make_edge (swtch, preheader,
1218 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
1219 e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p);
1223 if (extra_zero_check)
1225 /* Add branch for zero iterations. */
1226 p = REG_BR_PROB_BASE / (max_unroll + 1);
1228 preheader = split_edge (loop_preheader_edge (loop));
1229 branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ,
1230 block_label (preheader), p,
1232 gcc_assert (branch_code != NULL_RTX);
1234 swtch = split_edge_and_insert (single_succ_edge (swtch), branch_code);
1235 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
1236 single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p;
1237 e = make_edge (swtch, preheader,
1238 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
1239 e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p);
1243 /* Recount dominators for outer blocks. */
1244 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
1246 /* And unroll loop. */
1248 bitmap_ones (wont_exit);
1249 bitmap_clear_bit (wont_exit, may_exit_copy);
1250 opt_info_start_duplication (opt_info);
1252 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
1254 wont_exit, desc->out_edge,
1256 DLTHE_FLAG_UPDATE_FREQ
1258 ? DLTHE_RECORD_COPY_NUMBER
1264 apply_opt_in_copies (opt_info, max_unroll, true, true);
1265 free_opt_info (opt_info);
1272 basic_block exit_block = get_bb_copy (desc->in_edge->src);
1273 /* Find a new in and out edge; they are in the last copy we have
1276 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
1278 desc->out_edge = EDGE_SUCC (exit_block, 0);
1279 desc->in_edge = EDGE_SUCC (exit_block, 1);
1283 desc->out_edge = EDGE_SUCC (exit_block, 1);
1284 desc->in_edge = EDGE_SUCC (exit_block, 0);
1288 /* Remove the edges. */
1289 FOR_EACH_VEC_ELT (remove_edges, i, e)
1292 /* We must be careful when updating the number of iterations due to
1293 preconditioning and the fact that the value must be valid at entry
1294 of the loop. After passing through the above code, we see that
1295 the correct new number of iterations is this: */
1296 gcc_assert (!desc->const_iter);
1298 simplify_gen_binary (UDIV, desc->mode, old_niter,
1299 gen_int_mode (max_unroll + 1, desc->mode));
1300 loop->nb_iterations_upper_bound
1301 = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1);
1302 if (loop->any_estimate)
1303 loop->nb_iterations_estimate
1304 = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
1308 simplify_gen_binary (MINUS, desc->mode, desc->niter_expr, const1_rtx);
1309 desc->noloop_assumptions = NULL_RTX;
1310 --loop->nb_iterations_upper_bound;
1311 if (loop->any_estimate
1312 && loop->nb_iterations_estimate != 0)
1313 --loop->nb_iterations_estimate;
1315 loop->any_estimate = false;
1320 ";; Unrolled loop %d times, counting # of iterations "
1321 "in runtime, %i insns\n",
1322 max_unroll, num_loop_insns (loop));
1325 /* Decide whether to simply peel LOOP and how much. */
1327 decide_peel_simple (struct loop *loop, int flags)
1330 widest_int iterations;
1332 if (!(flags & UAP_PEEL))
1334 /* We were not asked to, just return back silently. */
1339 fprintf (dump_file, "\n;; Considering simply peeling loop\n");
1341 /* npeel = number of iterations to peel. */
1342 npeel = PARAM_VALUE (PARAM_MAX_PEELED_INSNS) / loop->ninsns;
1343 if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_PEEL_TIMES))
1344 npeel = PARAM_VALUE (PARAM_MAX_PEEL_TIMES);
1346 /* Skip big loops. */
1350 fprintf (dump_file, ";; Not considering loop, is too big\n");
1354 /* Do not simply peel loops with branches inside -- it increases number
1356 Exception is when we do have profile and we however have good chance
1357 to peel proper number of iterations loop will iterate in practice.
1358 TODO: this heuristic needs tunning; while for complette unrolling
1359 the branch inside loop mostly eliminates any improvements, for
1360 peeling it is not the case. Also a function call inside loop is
1361 also branch from branch prediction POV (and probably better reason
1362 to not unroll/peel). */
1363 if (num_loop_branches (loop) > 1
1364 && profile_status_for_fn (cfun) != PROFILE_READ)
1367 fprintf (dump_file, ";; Not peeling, contains branches\n");
1371 /* If we have realistic estimate on number of iterations, use it. */
1372 if (get_estimated_loop_iterations (loop, &iterations))
1374 /* TODO: unsigned/signed confusion */
1375 if (wi::leu_p (npeel, iterations))
1379 fprintf (dump_file, ";; Not peeling loop, rolls too much (");
1380 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1381 (HOST_WIDEST_INT) (iterations.to_shwi () + 1));
1382 fprintf (dump_file, " iterations > %d [maximum peelings])\n",
1387 npeel = iterations.to_shwi () + 1;
1389 /* If we have small enough bound on iterations, we can still peel (completely
1391 else if (get_max_loop_iterations (loop, &iterations)
1392 && wi::ltu_p (iterations, npeel))
1393 npeel = iterations.to_shwi () + 1;
1396 /* For now we have no good heuristics to decide whether loop peeling
1397 will be effective, so disable it. */
1400 ";; Not peeling loop, no evidence it will be profitable\n");
1405 loop->lpt_decision.decision = LPT_PEEL_SIMPLE;
1406 loop->lpt_decision.times = npeel;
1409 /* Peel a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this:
1414 ==> (LOOP->LPT_DECISION.TIMES == 3)
1416 if (!cond) goto end;
1418 if (!cond) goto end;
1420 if (!cond) goto end;
1427 peel_loop_simple (struct loop *loop)
1430 unsigned npeel = loop->lpt_decision.times;
1431 struct niter_desc *desc = get_simple_loop_desc (loop);
1432 struct opt_info *opt_info = NULL;
1435 if (flag_split_ivs_in_unroller && npeel > 1)
1436 opt_info = analyze_insns_in_loop (loop);
1438 wont_exit = sbitmap_alloc (npeel + 1);
1439 bitmap_clear (wont_exit);
1441 opt_info_start_duplication (opt_info);
1443 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1444 npeel, wont_exit, NULL,
1445 NULL, DLTHE_FLAG_UPDATE_FREQ
1447 ? DLTHE_RECORD_COPY_NUMBER
1455 apply_opt_in_copies (opt_info, npeel, false, false);
1456 free_opt_info (opt_info);
1461 if (desc->const_iter)
1463 desc->niter -= npeel;
1464 desc->niter_expr = GEN_INT (desc->niter);
1465 desc->noloop_assumptions = NULL_RTX;
1469 /* We cannot just update niter_expr, as its value might be clobbered
1470 inside loop. We could handle this by counting the number into
1471 temporary just like we do in runtime unrolling, but it does not
1473 free_simple_loop_desc (loop);
1477 fprintf (dump_file, ";; Peeling loop %d times\n", npeel);
1480 /* Decide whether to unroll LOOP stupidly and how much. */
1482 decide_unroll_stupid (struct loop *loop, int flags)
1484 unsigned nunroll, nunroll_by_av, i;
1485 struct niter_desc *desc;
1486 widest_int iterations;
1488 if (!(flags & UAP_UNROLL_ALL))
1490 /* We were not asked to, just return back silently. */
1495 fprintf (dump_file, "\n;; Considering unrolling loop stupidly\n");
1497 /* nunroll = total number of copies of the original loop body in
1498 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
1499 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
1501 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
1502 if (nunroll > nunroll_by_av)
1503 nunroll = nunroll_by_av;
1504 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
1505 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
1507 if (targetm.loop_unroll_adjust)
1508 nunroll = targetm.loop_unroll_adjust (nunroll, loop);
1510 /* Skip big loops. */
1514 fprintf (dump_file, ";; Not considering loop, is too big\n");
1518 /* Check for simple loops. */
1519 desc = get_simple_loop_desc (loop);
1521 /* Check simpleness. */
1522 if (desc->simple_p && !desc->assumptions)
1525 fprintf (dump_file, ";; The loop is simple\n");
1529 /* Do not unroll loops with branches inside -- it increases number
1531 TODO: this heuristic needs tunning; call inside the loop body
1532 is also relatively good reason to not unroll. */
1533 if (num_loop_branches (loop) > 1)
1536 fprintf (dump_file, ";; Not unrolling, contains branches\n");
1540 /* Check whether the loop rolls. */
1541 if ((get_estimated_loop_iterations (loop, &iterations)
1542 || get_max_loop_iterations (loop, &iterations))
1543 && wi::ltu_p (iterations, 2 * nunroll))
1546 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
1550 /* Success. Now force nunroll to be power of 2, as it seems that this
1551 improves results (partially because of better alignments, partially
1552 because of some dark magic). */
1553 for (i = 1; 2 * i <= nunroll; i *= 2)
1556 loop->lpt_decision.decision = LPT_UNROLL_STUPID;
1557 loop->lpt_decision.times = i - 1;
1560 /* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this:
1565 ==> (LOOP->LPT_DECISION.TIMES == 3)
1579 unroll_loop_stupid (struct loop *loop)
1582 unsigned nunroll = loop->lpt_decision.times;
1583 struct niter_desc *desc = get_simple_loop_desc (loop);
1584 struct opt_info *opt_info = NULL;
1587 if (flag_split_ivs_in_unroller
1588 || flag_variable_expansion_in_unroller)
1589 opt_info = analyze_insns_in_loop (loop);
1592 wont_exit = sbitmap_alloc (nunroll + 1);
1593 bitmap_clear (wont_exit);
1594 opt_info_start_duplication (opt_info);
1596 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
1599 DLTHE_FLAG_UPDATE_FREQ
1601 ? DLTHE_RECORD_COPY_NUMBER
1607 apply_opt_in_copies (opt_info, nunroll, true, true);
1608 free_opt_info (opt_info);
1615 /* We indeed may get here provided that there are nontrivial assumptions
1616 for a loop to be really simple. We could update the counts, but the
1617 problem is that we are unable to decide which exit will be taken
1618 (not really true in case the number of iterations is constant,
1619 but no one will do anything with this information, so we do not
1621 desc->simple_p = false;
1625 fprintf (dump_file, ";; Unrolled loop %d times, %i insns\n",
1626 nunroll, num_loop_insns (loop));
1629 /* Returns true if REG is referenced in one nondebug insn in LOOP.
1630 Set *DEBUG_USES to the number of debug insns that reference the
1634 referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg,
1637 basic_block *body, bb;
1642 body = get_loop_body (loop);
1643 for (i = 0; i < loop->num_nodes; i++)
1647 FOR_BB_INSNS (bb, insn)
1648 if (!rtx_referenced_p (reg, insn))
1650 else if (DEBUG_INSN_P (insn))
1652 else if (++count_ref > 1)
1656 return (count_ref == 1);
1659 /* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
1662 reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses)
1664 basic_block *body, bb;
1668 body = get_loop_body (loop);
1669 for (i = 0; debug_uses && i < loop->num_nodes; i++)
1673 FOR_BB_INSNS (bb, insn)
1674 if (!DEBUG_INSN_P (insn) || !rtx_referenced_p (reg, insn))
1678 validate_change (insn, &INSN_VAR_LOCATION_LOC (insn),
1679 gen_rtx_UNKNOWN_VAR_LOC (), 0);
1687 /* Determine whether INSN contains an accumulator
1688 which can be expanded into separate copies,
1689 one for each copy of the LOOP body.
1691 for (i = 0 ; i < n; i++)
1705 Return NULL if INSN contains no opportunity for expansion of accumulator.
1706 Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
1707 information and return a pointer to it.
1710 static struct var_to_expand *
1711 analyze_insn_to_expand_var (struct loop *loop, rtx insn)
1714 struct var_to_expand *ves;
1719 set = single_set (insn);
1723 dest = SET_DEST (set);
1724 src = SET_SRC (set);
1725 code = GET_CODE (src);
1727 if (code != PLUS && code != MINUS && code != MULT && code != FMA)
1730 if (FLOAT_MODE_P (GET_MODE (dest)))
1732 if (!flag_associative_math)
1734 /* In the case of FMA, we're also changing the rounding. */
1735 if (code == FMA && !flag_unsafe_math_optimizations)
1739 /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
1740 in MD. But if there is no optab to generate the insn, we can not
1741 perform the variable expansion. This can happen if an MD provides
1742 an insn but not a named pattern to generate it, for example to avoid
1743 producing code that needs additional mode switches like for x87/mmx.
1745 So we check have_insn_for which looks for an optab for the operation
1746 in SRC. If it doesn't exist, we can't perform the expansion even
1747 though INSN is valid. */
1748 if (!have_insn_for (code, GET_MODE (src)))
1752 && !(GET_CODE (dest) == SUBREG
1753 && REG_P (SUBREG_REG (dest))))
1756 /* Find the accumulator use within the operation. */
1759 /* We only support accumulation via FMA in the ADD position. */
1760 if (!rtx_equal_p (dest, XEXP (src, 2)))
1764 else if (rtx_equal_p (dest, XEXP (src, 0)))
1766 else if (rtx_equal_p (dest, XEXP (src, 1)))
1768 /* The method of expansion that we are using; which includes the
1769 initialization of the expansions with zero and the summation of
1770 the expansions at the end of the computation will yield wrong
1771 results for (x = something - x) thus avoid using it in that case. */
1779 /* It must not otherwise be used. */
1782 if (rtx_referenced_p (dest, XEXP (src, 0))
1783 || rtx_referenced_p (dest, XEXP (src, 1)))
1786 else if (rtx_referenced_p (dest, XEXP (src, 1 - accum_pos)))
1789 /* It must be used in exactly one insn. */
1790 if (!referenced_in_one_insn_in_loop_p (loop, dest, &debug_uses))
1795 fprintf (dump_file, "\n;; Expanding Accumulator ");
1796 print_rtl (dump_file, dest);
1797 fprintf (dump_file, "\n");
1801 /* Instead of resetting the debug insns, we could replace each
1802 debug use in the loop with the sum or product of all expanded
1803 accummulators. Since we'll only know of all expansions at the
1804 end, we'd have to keep track of which vars_to_expand a debug
1805 insn in the loop references, take note of each copy of the
1806 debug insn during unrolling, and when it's all done, compute
1807 the sum or product of each variable and adjust the original
1808 debug insn and each copy thereof. What a pain! */
1809 reset_debug_uses_in_loop (loop, dest, debug_uses);
1811 /* Record the accumulator to expand. */
1812 ves = XNEW (struct var_to_expand);
1814 ves->reg = copy_rtx (dest);
1815 ves->var_expansions.create (1);
1817 ves->op = GET_CODE (src);
1818 ves->expansion_count = 0;
1819 ves->reuse_expansion = 0;
1823 /* Determine whether there is an induction variable in INSN that
1824 we would like to split during unrolling.
1844 Return NULL if INSN contains no interesting IVs. Otherwise, allocate
1845 an IV_TO_SPLIT structure, fill it with the relevant information and return a
1848 static struct iv_to_split *
1849 analyze_iv_to_split_insn (rtx insn)
1853 struct iv_to_split *ivts;
1856 /* For now we just split the basic induction variables. Later this may be
1857 extended for example by selecting also addresses of memory references. */
1858 set = single_set (insn);
1862 dest = SET_DEST (set);
1866 if (!biv_p (insn, dest))
1869 ok = iv_analyze_result (insn, dest, &iv);
1871 /* This used to be an assert under the assumption that if biv_p returns
1872 true that iv_analyze_result must also return true. However, that
1873 assumption is not strictly correct as evidenced by pr25569.
1875 Returning NULL when iv_analyze_result returns false is safe and
1876 avoids the problems in pr25569 until the iv_analyze_* routines
1877 can be fixed, which is apparently hard and time consuming
1878 according to their author. */
1882 if (iv.step == const0_rtx
1883 || iv.mode != iv.extend_mode)
1886 /* Record the insn to split. */
1887 ivts = XNEW (struct iv_to_split);
1889 ivts->orig_var = dest;
1890 ivts->base_var = NULL_RTX;
1891 ivts->step = iv.step;
1899 /* Determines which of insns in LOOP can be optimized.
1900 Return a OPT_INFO struct with the relevant hash tables filled
1901 with all insns to be optimized. The FIRST_NEW_BLOCK field
1902 is undefined for the return value. */
1904 static struct opt_info *
1905 analyze_insns_in_loop (struct loop *loop)
1907 basic_block *body, bb;
1909 struct opt_info *opt_info = XCNEW (struct opt_info);
1911 struct iv_to_split *ivts = NULL;
1912 struct var_to_expand *ves = NULL;
1913 iv_to_split **slot1;
1914 var_to_expand **slot2;
1915 vec<edge> edges = get_loop_exit_edges (loop);
1917 bool can_apply = false;
1919 iv_analysis_loop_init (loop);
1921 body = get_loop_body (loop);
1923 if (flag_split_ivs_in_unroller)
1925 opt_info->insns_to_split.create (5 * loop->num_nodes);
1926 opt_info->iv_to_split_head = NULL;
1927 opt_info->iv_to_split_tail = &opt_info->iv_to_split_head;
1930 /* Record the loop exit bb and loop preheader before the unrolling. */
1931 opt_info->loop_preheader = loop_preheader_edge (loop)->src;
1933 if (edges.length () == 1)
1936 if (!(exit->flags & EDGE_COMPLEX))
1938 opt_info->loop_exit = split_edge (exit);
1943 if (flag_variable_expansion_in_unroller
1946 opt_info->insns_with_var_to_expand.create (5 * loop->num_nodes);
1947 opt_info->var_to_expand_head = NULL;
1948 opt_info->var_to_expand_tail = &opt_info->var_to_expand_head;
1951 for (i = 0; i < loop->num_nodes; i++)
1954 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1957 FOR_BB_INSNS (bb, insn)
1962 if (opt_info->insns_to_split.is_created ())
1963 ivts = analyze_iv_to_split_insn (insn);
1967 slot1 = opt_info->insns_to_split.find_slot (ivts, INSERT);
1968 gcc_assert (*slot1 == NULL);
1970 *opt_info->iv_to_split_tail = ivts;
1971 opt_info->iv_to_split_tail = &ivts->next;
1975 if (opt_info->insns_with_var_to_expand.is_created ())
1976 ves = analyze_insn_to_expand_var (loop, insn);
1980 slot2 = opt_info->insns_with_var_to_expand.find_slot (ves, INSERT);
1981 gcc_assert (*slot2 == NULL);
1983 *opt_info->var_to_expand_tail = ves;
1984 opt_info->var_to_expand_tail = &ves->next;
1994 /* Called just before loop duplication. Records start of duplicated area
1998 opt_info_start_duplication (struct opt_info *opt_info)
2001 opt_info->first_new_block = last_basic_block_for_fn (cfun);
2004 /* Determine the number of iterations between initialization of the base
2005 variable and the current copy (N_COPY). N_COPIES is the total number
2006 of newly created copies. UNROLLING is true if we are unrolling
2007 (not peeling) the loop. */
2010 determine_split_iv_delta (unsigned n_copy, unsigned n_copies, bool unrolling)
2014 /* If we are unrolling, initialization is done in the original loop
2020 /* If we are peeling, the copy in that the initialization occurs has
2021 number 1. The original loop (number 0) is the last. */
2029 /* Locate in EXPR the expression corresponding to the location recorded
2030 in IVTS, and return a pointer to the RTX for this location. */
2033 get_ivts_expr (rtx expr, struct iv_to_split *ivts)
2038 for (i = 0; i < ivts->n_loc; i++)
2039 ret = &XEXP (*ret, ivts->loc[i]);
2044 /* Allocate basic variable for the induction variable chain. */
2047 allocate_basic_variable (struct iv_to_split *ivts)
2049 rtx expr = *get_ivts_expr (single_set (ivts->insn), ivts);
2051 ivts->base_var = gen_reg_rtx (GET_MODE (expr));
2054 /* Insert initialization of basic variable of IVTS before INSN, taking
2055 the initial value from INSN. */
2058 insert_base_initialization (struct iv_to_split *ivts, rtx insn)
2060 rtx expr = copy_rtx (*get_ivts_expr (single_set (insn), ivts));
2064 expr = force_operand (expr, ivts->base_var);
2065 if (expr != ivts->base_var)
2066 emit_move_insn (ivts->base_var, expr);
2070 emit_insn_before (seq, insn);
2073 /* Replace the use of induction variable described in IVTS in INSN
2074 by base variable + DELTA * step. */
2077 split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta)
2079 rtx expr, *loc, seq, incr, var;
2080 enum machine_mode mode = GET_MODE (ivts->base_var);
2083 /* Construct base + DELTA * step. */
2085 expr = ivts->base_var;
2088 incr = simplify_gen_binary (MULT, mode,
2089 ivts->step, gen_int_mode (delta, mode));
2090 expr = simplify_gen_binary (PLUS, GET_MODE (ivts->base_var),
2091 ivts->base_var, incr);
2094 /* Figure out where to do the replacement. */
2095 loc = get_ivts_expr (single_set (insn), ivts);
2097 /* If we can make the replacement right away, we're done. */
2098 if (validate_change (insn, loc, expr, 0))
2101 /* Otherwise, force EXPR into a register and try again. */
2103 var = gen_reg_rtx (mode);
2104 expr = force_operand (expr, var);
2106 emit_move_insn (var, expr);
2109 emit_insn_before (seq, insn);
2111 if (validate_change (insn, loc, var, 0))
2114 /* The last chance. Try recreating the assignment in insn
2115 completely from scratch. */
2116 set = single_set (insn);
2121 src = copy_rtx (SET_SRC (set));
2122 dest = copy_rtx (SET_DEST (set));
2123 src = force_operand (src, dest);
2125 emit_move_insn (dest, src);
2129 emit_insn_before (seq, insn);
2134 /* Return one expansion of the accumulator recorded in struct VE. */
2137 get_expansion (struct var_to_expand *ve)
2141 if (ve->reuse_expansion == 0)
2144 reg = ve->var_expansions[ve->reuse_expansion - 1];
2146 if (ve->var_expansions.length () == (unsigned) ve->reuse_expansion)
2147 ve->reuse_expansion = 0;
2149 ve->reuse_expansion++;
2155 /* Given INSN replace the uses of the accumulator recorded in VE
2156 with a new register. */
2159 expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
2162 bool really_new_expansion = false;
2164 set = single_set (insn);
2167 /* Generate a new register only if the expansion limit has not been
2168 reached. Else reuse an already existing expansion. */
2169 if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
2171 really_new_expansion = true;
2172 new_reg = gen_reg_rtx (GET_MODE (ve->reg));
2175 new_reg = get_expansion (ve);
2177 validate_replace_rtx_group (SET_DEST (set), new_reg, insn);
2178 if (apply_change_group ())
2179 if (really_new_expansion)
2181 ve->var_expansions.safe_push (new_reg);
2182 ve->expansion_count++;
2186 /* Initialize the variable expansions in loop preheader. PLACE is the
2187 loop-preheader basic block where the initialization of the
2188 expansions should take place. The expansions are initialized with
2189 (-0) when the operation is plus or minus to honor sign zero. This
2190 way we can prevent cases where the sign of the final result is
2191 effected by the sign of the expansion. Here is an example to
2194 for (i = 0 ; i < n; i++)
2208 When SUM is initialized with -zero and SOMETHING is also -zero; the
2209 final result of sum should be -zero thus the expansions sum1 and sum2
2210 should be initialized with -zero as well (otherwise we will get +zero
2211 as the final result). */
2214 insert_var_expansion_initialization (struct var_to_expand *ve,
2217 rtx seq, var, zero_init;
2219 enum machine_mode mode = GET_MODE (ve->reg);
2220 bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode);
2222 if (ve->var_expansions.length () == 0)
2229 /* Note that we only accumulate FMA via the ADD operand. */
2232 FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
2234 if (honor_signed_zero_p)
2235 zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode);
2237 zero_init = CONST0_RTX (mode);
2238 emit_move_insn (var, zero_init);
2243 FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
2245 zero_init = CONST1_RTX (GET_MODE (var));
2246 emit_move_insn (var, zero_init);
2257 emit_insn_after (seq, BB_END (place));
2260 /* Combine the variable expansions at the loop exit. PLACE is the
2261 loop exit basic block where the summation of the expansions should
2265 combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place)
2268 rtx expr, seq, var, insn;
2271 if (ve->var_expansions.length () == 0)
2278 /* Note that we only accumulate FMA via the ADD operand. */
2281 FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
2282 sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg), var, sum);
2286 FOR_EACH_VEC_ELT (ve->var_expansions, i, var)
2287 sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), var, sum);
2294 expr = force_operand (sum, ve->reg);
2295 if (expr != ve->reg)
2296 emit_move_insn (ve->reg, expr);
2300 insn = BB_HEAD (place);
2301 while (!NOTE_INSN_BASIC_BLOCK_P (insn))
2302 insn = NEXT_INSN (insn);
2304 emit_insn_after (seq, insn);
2307 /* Strip away REG_EQUAL notes for IVs we're splitting.
2309 Updating REG_EQUAL notes for IVs we split is tricky: We
2310 cannot tell until after unrolling, DF-rescanning, and liveness
2311 updating, whether an EQ_USE is reached by the split IV while
2312 the IV reg is still live. See PR55006.
2314 ??? We cannot use remove_reg_equal_equiv_notes_for_regno,
2315 because RTL loop-iv requires us to defer rescanning insns and
2316 any notes attached to them. So resort to old techniques... */
2319 maybe_strip_eq_note_for_split_iv (struct opt_info *opt_info, rtx insn)
2321 struct iv_to_split *ivts;
2322 rtx note = find_reg_equal_equiv_note (insn);
2325 for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
2326 if (reg_mentioned_p (ivts->orig_var, note))
2328 remove_note (insn, note);
2333 /* Apply loop optimizations in loop copies using the
2334 data which gathered during the unrolling. Structure
2335 OPT_INFO record that data.
2337 UNROLLING is true if we unrolled (not peeled) the loop.
2338 REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
2339 the loop (as it should happen in complete unrolling, but not in ordinary
2340 peeling of the loop). */
2343 apply_opt_in_copies (struct opt_info *opt_info,
2344 unsigned n_copies, bool unrolling,
2345 bool rewrite_original_loop)
2348 basic_block bb, orig_bb;
2349 rtx insn, orig_insn, next;
2350 struct iv_to_split ivts_templ, *ivts;
2351 struct var_to_expand ve_templ, *ves;
2353 /* Sanity check -- we need to put initialization in the original loop
2355 gcc_assert (!unrolling || rewrite_original_loop);
2357 /* Allocate the basic variables (i0). */
2358 if (opt_info->insns_to_split.is_created ())
2359 for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
2360 allocate_basic_variable (ivts);
2362 for (i = opt_info->first_new_block;
2363 i < (unsigned) last_basic_block_for_fn (cfun);
2366 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2367 orig_bb = get_bb_original (bb);
2369 /* bb->aux holds position in copy sequence initialized by
2370 duplicate_loop_to_header_edge. */
2371 delta = determine_split_iv_delta ((size_t)bb->aux, n_copies,
2374 orig_insn = BB_HEAD (orig_bb);
2375 FOR_BB_INSNS_SAFE (bb, insn, next)
2378 || (DEBUG_INSN_P (insn)
2379 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL))
2382 while (!INSN_P (orig_insn)
2383 || (DEBUG_INSN_P (orig_insn)
2384 && (TREE_CODE (INSN_VAR_LOCATION_DECL (orig_insn))
2386 orig_insn = NEXT_INSN (orig_insn);
2388 ivts_templ.insn = orig_insn;
2389 ve_templ.insn = orig_insn;
2391 /* Apply splitting iv optimization. */
2392 if (opt_info->insns_to_split.is_created ())
2394 maybe_strip_eq_note_for_split_iv (opt_info, insn);
2396 ivts = opt_info->insns_to_split.find (&ivts_templ);
2400 gcc_assert (GET_CODE (PATTERN (insn))
2401 == GET_CODE (PATTERN (orig_insn)));
2404 insert_base_initialization (ivts, insn);
2405 split_iv (ivts, insn, delta);
2408 /* Apply variable expansion optimization. */
2409 if (unrolling && opt_info->insns_with_var_to_expand.is_created ())
2411 ves = (struct var_to_expand *)
2412 opt_info->insns_with_var_to_expand.find (&ve_templ);
2415 gcc_assert (GET_CODE (PATTERN (insn))
2416 == GET_CODE (PATTERN (orig_insn)));
2417 expand_var_during_unrolling (ves, insn);
2420 orig_insn = NEXT_INSN (orig_insn);
2424 if (!rewrite_original_loop)
2427 /* Initialize the variable expansions in the loop preheader
2428 and take care of combining them at the loop exit. */
2429 if (opt_info->insns_with_var_to_expand.is_created ())
2431 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2432 insert_var_expansion_initialization (ves, opt_info->loop_preheader);
2433 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2434 combine_var_copies_in_loop_exit (ves, opt_info->loop_exit);
2437 /* Rewrite also the original loop body. Find them as originals of the blocks
2438 in the last copied iteration, i.e. those that have
2439 get_bb_copy (get_bb_original (bb)) == bb. */
2440 for (i = opt_info->first_new_block;
2441 i < (unsigned) last_basic_block_for_fn (cfun);
2444 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2445 orig_bb = get_bb_original (bb);
2446 if (get_bb_copy (orig_bb) != bb)
2449 delta = determine_split_iv_delta (0, n_copies, unrolling);
2450 for (orig_insn = BB_HEAD (orig_bb);
2451 orig_insn != NEXT_INSN (BB_END (bb));
2454 next = NEXT_INSN (orig_insn);
2456 if (!INSN_P (orig_insn))
2459 ivts_templ.insn = orig_insn;
2460 if (opt_info->insns_to_split.is_created ())
2462 maybe_strip_eq_note_for_split_iv (opt_info, orig_insn);
2464 ivts = (struct iv_to_split *)
2465 opt_info->insns_to_split.find (&ivts_templ);
2469 insert_base_initialization (ivts, orig_insn);
2470 split_iv (ivts, orig_insn, delta);
2479 /* Release OPT_INFO. */
2482 free_opt_info (struct opt_info *opt_info)
2484 if (opt_info->insns_to_split.is_created ())
2485 opt_info->insns_to_split.dispose ();
2486 if (opt_info->insns_with_var_to_expand.is_created ())
2488 struct var_to_expand *ves;
2490 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2491 ves->var_expansions.release ();
2492 opt_info->insns_with_var_to_expand.dispose ();