1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 - reordering of memory allocation and freeing to be more space efficient
23 - do rough calc of how many regs are needed in each block, and a rough
24 calc of how many regs are available in each class and use that to
25 throttle back the code in cases where RTX_COST is minimal.
28 /* References searched while implementing this.
30 Compilers Principles, Techniques and Tools
34 Global Optimization by Suppression of Partial Redundancies
36 communications of the acm, Vol. 22, Num. 2, Feb. 1979
38 A Portable Machine-Independent Global Optimizer - Design and Measurements
40 Stanford Ph.D. thesis, Dec. 1983
42 A Fast Algorithm for Code Movement Optimization
44 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
46 A Solution to a Problem with Morel and Renvoise's
47 Global Optimization by Suppression of Partial Redundancies
48 K-H Drechsler, M.P. Stadel
49 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
51 Practical Adaptation of the Global Optimization
52 Algorithm of Morel and Renvoise
54 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
56 Efficiently Computing Static Single Assignment Form and the Control
58 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
59 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
62 J. Knoop, O. Ruthing, B. Steffen
63 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
65 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
66 Time for Reducible Flow Control
68 ACM Letters on Programming Languages and Systems,
69 Vol. 2, Num. 1-4, Mar-Dec 1993
71 An Efficient Representation for Sparse Sets
72 Preston Briggs, Linda Torczon
73 ACM Letters on Programming Languages and Systems,
74 Vol. 2, Num. 1-4, Mar-Dec 1993
76 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
77 K-H Drechsler, M.P. Stadel
78 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
80 Partial Dead Code Elimination
81 J. Knoop, O. Ruthing, B. Steffen
82 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
84 Effective Partial Redundancy Elimination
85 P. Briggs, K.D. Cooper
86 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
88 The Program Structure Tree: Computing Control Regions in Linear Time
89 R. Johnson, D. Pearson, K. Pingali
90 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92 Optimal Code Motion: Theory and Practice
93 J. Knoop, O. Ruthing, B. Steffen
94 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
96 The power of assignment motion
97 J. Knoop, O. Ruthing, B. Steffen
98 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
100 Global code motion / global value numbering
102 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
104 Value Driven Redundancy Elimination
106 Rice University Ph.D. thesis, Apr. 1996
110 Massively Scalar Compiler Project, Rice University, Sep. 1996
112 High Performance Compilers for Parallel Computing
116 Advanced Compiler Design and Implementation
118 Morgan Kaufmann, 1997
120 Building an Optimizing Compiler
124 People wishing to speed up the code here should read:
125 Elimination Algorithms for Data Flow Analysis
126 B.G. Ryder, M.C. Paull
127 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
129 How to Analyze Large Programs Efficiently and Informatively
130 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
131 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
133 People wishing to do something different can find various possibilities
134 in the above papers and elsewhere.
139 #include "coretypes.h"
141 #include "diagnostic-core.h"
148 #include "hard-reg-set.h"
150 #include "insn-config.h"
152 #include "basic-block.h"
153 #include "function.h"
161 #include "tree-pass.h"
168 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
169 are a superset of those done by classic GCSE.
171 Two passes of copy/constant propagation are done around PRE or hoisting
172 because the first one enables more GCSE and the second one helps to clean
173 up the copies that PRE and HOIST create. This is needed more for PRE than
174 for HOIST because code hoisting will try to use an existing register
175 containing the common subexpression rather than create a new one. This is
176 harder to do for PRE because of the code motion (which HOIST doesn't do).
178 Expressions we are interested in GCSE-ing are of the form
179 (set (pseudo-reg) (expression)).
180 Function want_to_gcse_p says what these are.
182 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
183 This allows PRE to hoist expressions that are expressed in multiple insns,
184 such as complex address calculations (e.g. for PIC code, or loads with a
185 high part and a low part).
187 PRE handles moving invariant expressions out of loops (by treating them as
188 partially redundant).
190 **********************
192 We used to support multiple passes but there are diminishing returns in
193 doing so. The first pass usually makes 90% of the changes that are doable.
194 A second pass can make a few more changes made possible by the first pass.
195 Experiments show any further passes don't make enough changes to justify
198 A study of spec92 using an unlimited number of passes:
199 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
200 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
201 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
203 It was found doing copy propagation between each pass enables further
206 This study was done before expressions in REG_EQUAL notes were added as
207 candidate expressions for optimization, and before the GIMPLE optimizers
208 were added. Probably, multiple passes is even less efficient now than
209 at the time when the study was conducted.
211 PRE is quite expensive in complicated functions because the DFA can take
212 a while to converge. Hence we only perform one pass.
214 **********************
216 The steps for PRE are:
218 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
220 2) Perform the data flow analysis for PRE.
222 3) Delete the redundant instructions
224 4) Insert the required copies [if any] that make the partially
225 redundant instructions fully redundant.
227 5) For other reaching expressions, insert an instruction to copy the value
228 to a newly created pseudo that will reach the redundant instruction.
230 The deletion is done first so that when we do insertions we
231 know which pseudo reg to use.
233 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
234 argue it is not. The number of iterations for the algorithm to converge
235 is typically 2-4 so I don't view it as that expensive (relatively speaking).
237 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
238 we create. To make an expression reach the place where it's redundant,
239 the result of the expression is copied to a new register, and the redundant
240 expression is deleted by replacing it with this new register. Classic GCSE
241 doesn't have this problem as much as it computes the reaching defs of
242 each register in each block and thus can try to use an existing
245 /* GCSE global vars. */
247 struct target_gcse default_target_gcse;
248 #if SWITCHABLE_TARGET
249 struct target_gcse *this_target_gcse = &default_target_gcse;
252 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
253 int flag_rerun_cse_after_global_opts;
255 /* An obstack for our working variables. */
256 static struct obstack gcse_obstack;
258 struct reg_use {rtx reg_rtx; };
260 /* Hash table of expressions. */
264 /* The expression. */
266 /* Index in the available expression bitmaps. */
268 /* Next entry with the same hash. */
269 struct expr *next_same_hash;
270 /* List of anticipatable occurrences in basic blocks in the function.
271 An "anticipatable occurrence" is one that is the first occurrence in the
272 basic block, the operands are not modified in the basic block prior
273 to the occurrence and the output is not used between the start of
274 the block and the occurrence. */
275 struct occr *antic_occr;
276 /* List of available occurrence in basic blocks in the function.
277 An "available occurrence" is one that is the last occurrence in the
278 basic block and the operands are not modified by following statements in
279 the basic block [including this insn]. */
280 struct occr *avail_occr;
281 /* Non-null if the computation is PRE redundant.
282 The value is the newly created pseudo-reg to record a copy of the
283 expression in all the places that reach the redundant copy. */
285 /* Maximum distance in instructions this expression can travel.
286 We avoid moving simple expressions for more than a few instructions
287 to keep register pressure under control.
288 A value of "0" removes restrictions on how far the expression can
293 /* Occurrence of an expression.
294 There is one per basic block. If a pattern appears more than once the
295 last appearance is used [or first for anticipatable expressions]. */
299 /* Next occurrence of this expression. */
301 /* The insn that computes the expression. */
303 /* Nonzero if this [anticipatable] occurrence has been deleted. */
305 /* Nonzero if this [available] occurrence has been copied to
307 /* ??? This is mutually exclusive with deleted_p, so they could share
312 typedef struct occr *occr_t;
314 DEF_VEC_ALLOC_P (occr_t, heap);
316 /* Expression hash tables.
317 Each hash table is an array of buckets.
318 ??? It is known that if it were an array of entries, structure elements
319 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
320 not clear whether in the final analysis a sufficient amount of memory would
321 be saved as the size of the available expression bitmaps would be larger
322 [one could build a mapping table without holes afterwards though].
323 Someday I'll perform the computation and figure it out. */
328 This is an array of `expr_hash_table_size' elements. */
331 /* Size of the hash table, in elements. */
334 /* Number of hash table elements. */
335 unsigned int n_elems;
338 /* Expression hash table. */
339 static struct hash_table_d expr_hash_table;
341 /* This is a list of expressions which are MEMs and will be used by load
343 Load motion tracks MEMs which aren't killed by anything except itself,
344 i.e. loads and stores to a single location.
345 We can then allow movement of these MEM refs with a little special
346 allowance. (all stores copy the same value to the reaching reg used
347 for the loads). This means all values used to store into memory must have
348 no side effects so we can re-issue the setter value. */
352 struct expr * expr; /* Gcse expression reference for LM. */
353 rtx pattern; /* Pattern of this mem. */
354 rtx pattern_regs; /* List of registers mentioned by the mem. */
355 rtx loads; /* INSN list of loads seen. */
356 rtx stores; /* INSN list of stores seen. */
357 struct ls_expr * next; /* Next in the list. */
358 int invalid; /* Invalid for some reason. */
359 int index; /* If it maps to a bitmap index. */
360 unsigned int hash_index; /* Index when in a hash table. */
361 rtx reaching_reg; /* Register to use when re-writing. */
364 /* Head of the list of load/store memory refs. */
365 static struct ls_expr * pre_ldst_mems = NULL;
367 /* Hashtable for the load/store memory refs. */
368 static htab_t pre_ldst_table = NULL;
370 /* Bitmap containing one bit for each register in the program.
371 Used when performing GCSE to track which registers have been set since
372 the start of the basic block. */
373 static regset reg_set_bitmap;
375 /* Array, indexed by basic block number for a list of insns which modify
376 memory within that block. */
377 static VEC (rtx,heap) **modify_mem_list;
378 static bitmap modify_mem_list_set;
380 typedef struct modify_pair_s
382 rtx dest; /* A MEM. */
383 rtx dest_addr; /* The canonical address of `dest'. */
386 DEF_VEC_O(modify_pair);
387 DEF_VEC_ALLOC_O(modify_pair,heap);
389 /* This array parallels modify_mem_list, except that it stores MEMs
390 being set and their canonicalized memory addresses. */
391 static VEC (modify_pair,heap) **canon_modify_mem_list;
393 /* Bitmap indexed by block numbers to record which blocks contain
395 static bitmap blocks_with_calls;
397 /* Various variables for statistics gathering. */
399 /* Memory used in a pass.
400 This isn't intended to be absolutely precise. Its intent is only
401 to keep an eye on memory usage. */
402 static int bytes_used;
404 /* GCSE substitutions made. */
405 static int gcse_subst_count;
406 /* Number of copy instructions created. */
407 static int gcse_create_count;
409 /* Doing code hoisting. */
410 static bool doing_code_hoisting_p = false;
412 /* For available exprs */
413 static sbitmap *ae_kill;
415 static void compute_can_copy (void);
416 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
417 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
418 static void *gcse_alloc (unsigned long);
419 static void alloc_gcse_mem (void);
420 static void free_gcse_mem (void);
421 static void hash_scan_insn (rtx, struct hash_table_d *);
422 static void hash_scan_set (rtx, rtx, struct hash_table_d *);
423 static void hash_scan_clobber (rtx, rtx, struct hash_table_d *);
424 static void hash_scan_call (rtx, rtx, struct hash_table_d *);
425 static int want_to_gcse_p (rtx, int *);
426 static int oprs_unchanged_p (const_rtx, const_rtx, int);
427 static int oprs_anticipatable_p (const_rtx, const_rtx);
428 static int oprs_available_p (const_rtx, const_rtx);
429 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, int,
430 struct hash_table_d *);
431 static unsigned int hash_expr (const_rtx, enum machine_mode, int *, int);
432 static int expr_equiv_p (const_rtx, const_rtx);
433 static void record_last_reg_set_info (rtx, int);
434 static void record_last_mem_set_info (rtx);
435 static void record_last_set_info (rtx, const_rtx, void *);
436 static void compute_hash_table (struct hash_table_d *);
437 static void alloc_hash_table (struct hash_table_d *);
438 static void free_hash_table (struct hash_table_d *);
439 static void compute_hash_table_work (struct hash_table_d *);
440 static void dump_hash_table (FILE *, const char *, struct hash_table_d *);
441 static void compute_transp (const_rtx, int, sbitmap *);
442 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
443 struct hash_table_d *);
444 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
445 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
446 static void canon_list_insert (rtx, const_rtx, void *);
447 static void alloc_pre_mem (int, int);
448 static void free_pre_mem (void);
449 static struct edge_list *compute_pre_data (void);
450 static int pre_expr_reaches_here_p (basic_block, struct expr *,
452 static void insert_insn_end_basic_block (struct expr *, basic_block);
453 static void pre_insert_copy_insn (struct expr *, rtx);
454 static void pre_insert_copies (void);
455 static int pre_delete (void);
456 static int pre_gcse (struct edge_list *);
457 static int one_pre_gcse_pass (void);
458 static void add_label_notes (rtx, rtx);
459 static void alloc_code_hoist_mem (int, int);
460 static void free_code_hoist_mem (void);
461 static void compute_code_hoist_vbeinout (void);
462 static void compute_code_hoist_data (void);
463 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *,
465 static int hoist_code (void);
466 static int one_code_hoisting_pass (void);
467 static rtx process_insert_insn (struct expr *);
468 static int pre_edge_insert (struct edge_list *, struct expr **);
469 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
470 basic_block, char *);
471 static struct ls_expr * ldst_entry (rtx);
472 static void free_ldst_entry (struct ls_expr *);
473 static void free_ld_motion_mems (void);
474 static void print_ldst_list (FILE *);
475 static struct ls_expr * find_rtx_in_ldst (rtx);
476 static int simple_mem (const_rtx);
477 static void invalidate_any_buried_refs (rtx);
478 static void compute_ld_motion_mems (void);
479 static void trim_ld_motion_mems (void);
480 static void update_ld_motion_stores (struct expr *);
481 static void clear_modify_mem_tables (void);
482 static void free_modify_mem_tables (void);
483 static rtx gcse_emit_move_after (rtx, rtx, rtx);
484 static bool is_too_expensive (const char *);
486 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
487 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
489 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
490 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
492 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
493 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
495 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
496 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
498 /* Misc. utilities. */
501 (this_target_gcse->x_can_copy)
502 #define can_copy_init_p \
503 (this_target_gcse->x_can_copy_init_p)
505 /* Compute which modes support reg/reg copy operations. */
508 compute_can_copy (void)
511 #ifndef AVOID_CCMODE_COPIES
514 memset (can_copy, 0, NUM_MACHINE_MODES);
517 for (i = 0; i < NUM_MACHINE_MODES; i++)
518 if (GET_MODE_CLASS (i) == MODE_CC)
520 #ifdef AVOID_CCMODE_COPIES
523 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
524 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
525 if (recog (PATTERN (insn), insn, NULL) >= 0)
535 /* Returns whether the mode supports reg/reg copy operations. */
538 can_copy_p (enum machine_mode mode)
540 if (! can_copy_init_p)
543 can_copy_init_p = true;
546 return can_copy[mode] != 0;
549 /* Cover function to xmalloc to record bytes allocated. */
552 gmalloc (size_t size)
555 return xmalloc (size);
558 /* Cover function to xcalloc to record bytes allocated. */
561 gcalloc (size_t nelem, size_t elsize)
563 bytes_used += nelem * elsize;
564 return xcalloc (nelem, elsize);
567 /* Cover function to obstack_alloc. */
570 gcse_alloc (unsigned long size)
573 return obstack_alloc (&gcse_obstack, size);
576 /* Allocate memory for the reg/memory set tracking tables.
577 This is called at the start of each pass. */
580 alloc_gcse_mem (void)
582 /* Allocate vars to track sets of regs. */
583 reg_set_bitmap = ALLOC_REG_SET (NULL);
585 /* Allocate array to keep a list of insns which modify memory in each
587 modify_mem_list = GCNEWVEC (VEC (rtx,heap) *, last_basic_block);
588 canon_modify_mem_list = GCNEWVEC (VEC (modify_pair,heap) *,
590 modify_mem_list_set = BITMAP_ALLOC (NULL);
591 blocks_with_calls = BITMAP_ALLOC (NULL);
594 /* Free memory allocated by alloc_gcse_mem. */
599 FREE_REG_SET (reg_set_bitmap);
601 free_modify_mem_tables ();
602 BITMAP_FREE (modify_mem_list_set);
603 BITMAP_FREE (blocks_with_calls);
606 /* Compute the local properties of each recorded expression.
608 Local properties are those that are defined by the block, irrespective of
611 An expression is transparent in a block if its operands are not modified
614 An expression is computed (locally available) in a block if it is computed
615 at least once and expression would contain the same value if the
616 computation was moved to the end of the block.
618 An expression is locally anticipatable in a block if it is computed at
619 least once and expression would contain the same value if the computation
620 was moved to the beginning of the block.
622 We call this routine for pre and code hoisting. They all compute
623 basically the same information and thus can easily share this code.
625 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
626 properties. If NULL, then it is not necessary to compute or record that
629 TABLE controls which hash table to look at. */
632 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
633 struct hash_table_d *table)
637 /* Initialize any bitmaps that were passed in. */
640 sbitmap_vector_ones (transp, last_basic_block);
644 sbitmap_vector_zero (comp, last_basic_block);
646 sbitmap_vector_zero (antloc, last_basic_block);
648 for (i = 0; i < table->size; i++)
652 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
654 int indx = expr->bitmap_index;
657 /* The expression is transparent in this block if it is not killed.
658 We start by assuming all are transparent [none are killed], and
659 then reset the bits for those that are. */
661 compute_transp (expr->expr, indx, transp);
663 /* The occurrences recorded in antic_occr are exactly those that
664 we want to set to nonzero in ANTLOC. */
666 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
668 SET_BIT (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
670 /* While we're scanning the table, this is a good place to
675 /* The occurrences recorded in avail_occr are exactly those that
676 we want to set to nonzero in COMP. */
678 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
680 SET_BIT (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
682 /* While we're scanning the table, this is a good place to
687 /* While we're scanning the table, this is a good place to
689 expr->reaching_reg = 0;
694 /* Hash table support. */
696 struct reg_avail_info
703 static struct reg_avail_info *reg_avail_info;
704 static basic_block current_bb;
706 /* See whether X, the source of a set, is something we want to consider for
710 want_to_gcse_p (rtx x, int *max_distance_ptr)
713 /* On register stack architectures, don't GCSE constants from the
714 constant pool, as the benefits are often swamped by the overhead
715 of shuffling the register stack between basic blocks. */
716 if (IS_STACK_MODE (GET_MODE (x)))
717 x = avoid_constant_pool_reference (x);
720 /* GCSE'ing constants:
722 We do not specifically distinguish between constant and non-constant
723 expressions in PRE and Hoist. We use set_src_cost below to limit
724 the maximum distance simple expressions can travel.
726 Nevertheless, constants are much easier to GCSE, and, hence,
727 it is easy to overdo the optimizations. Usually, excessive PRE and
728 Hoisting of constant leads to increased register pressure.
730 RA can deal with this by rematerialing some of the constants.
731 Therefore, it is important that the back-end generates sets of constants
732 in a way that allows reload rematerialize them under high register
733 pressure, i.e., a pseudo register with REG_EQUAL to constant
734 is set only once. Failing to do so will result in IRA/reload
735 spilling such constants under high register pressure instead of
736 rematerializing them. */
738 switch (GET_CODE (x))
746 if (!doing_code_hoisting_p)
747 /* Do not PRE constants. */
753 if (doing_code_hoisting_p)
754 /* PRE doesn't implement max_distance restriction. */
759 gcc_assert (!optimize_function_for_speed_p (cfun)
760 && optimize_function_for_size_p (cfun));
761 cost = set_src_cost (x, 0);
763 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
765 max_distance = (GCSE_COST_DISTANCE_RATIO * cost) / 10;
766 if (max_distance == 0)
769 gcc_assert (max_distance > 0);
774 if (max_distance_ptr)
775 *max_distance_ptr = max_distance;
778 return can_assign_to_reg_without_clobbers_p (x);
782 /* Used internally by can_assign_to_reg_without_clobbers_p. */
784 static GTY(()) rtx test_insn;
786 /* Return true if we can assign X to a pseudo register such that the
787 resulting insn does not result in clobbering a hard register as a
790 Additionally, if the target requires it, check that the resulting insn
791 can be copied. If it cannot, this means that X is special and probably
792 has hidden side-effects we don't want to mess with.
794 This function is typically used by code motion passes, to verify
795 that it is safe to insert an insn without worrying about clobbering
796 maybe live hard regs. */
799 can_assign_to_reg_without_clobbers_p (rtx x)
801 int num_clobbers = 0;
804 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
805 if (general_operand (x, GET_MODE (x)))
807 else if (GET_MODE (x) == VOIDmode)
810 /* Otherwise, check if we can make a valid insn from it. First initialize
811 our test insn if we haven't already. */
815 = make_insn_raw (gen_rtx_SET (VOIDmode,
816 gen_rtx_REG (word_mode,
817 FIRST_PSEUDO_REGISTER * 2),
819 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
822 /* Now make an insn like the one we would make when GCSE'ing and see if
824 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
825 SET_SRC (PATTERN (test_insn)) = x;
827 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
831 if (num_clobbers > 0 && added_clobbers_hard_reg_p (icode))
834 if (targetm.cannot_copy_insn_p && targetm.cannot_copy_insn_p (test_insn))
840 /* Return nonzero if the operands of expression X are unchanged from the
841 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
842 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
845 oprs_unchanged_p (const_rtx x, const_rtx insn, int avail_p)
859 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
861 if (info->last_bb != current_bb)
864 return info->last_set < DF_INSN_LUID (insn);
866 return info->first_set >= DF_INSN_LUID (insn);
870 if (load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
874 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
898 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
902 /* If we are about to do the last recursive call needed at this
903 level, change it into iteration. This function is called enough
906 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
908 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
911 else if (fmt[i] == 'E')
912 for (j = 0; j < XVECLEN (x, i); j++)
913 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
920 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
922 struct mem_conflict_info
924 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
925 see if a memory store conflicts with this memory load. */
928 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
933 /* DEST is the output of an instruction. If it is a memory reference and
934 possibly conflicts with the load found in DATA, then communicate this
935 information back through DATA. */
938 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
941 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
943 while (GET_CODE (dest) == SUBREG
944 || GET_CODE (dest) == ZERO_EXTRACT
945 || GET_CODE (dest) == STRICT_LOW_PART)
946 dest = XEXP (dest, 0);
948 /* If DEST is not a MEM, then it will not conflict with the load. Note
949 that function calls are assumed to clobber memory, but are handled
954 /* If we are setting a MEM in our list of specially recognized MEMs,
955 don't mark as killed this time. */
956 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
958 if (!find_rtx_in_ldst (dest))
959 mci->conflict = true;
963 if (true_dependence (dest, GET_MODE (dest), mci->mem))
964 mci->conflict = true;
967 /* Return nonzero if the expression in X (a memory reference) is killed
968 in block BB before or after the insn with the LUID in UID_LIMIT.
969 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
972 To check the entire block, set UID_LIMIT to max_uid + 1 and
976 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
979 VEC (rtx,heap) *list = modify_mem_list[bb->index];
983 /* If this is a readonly then we aren't going to be changing it. */
984 if (MEM_READONLY_P (x))
987 FOR_EACH_VEC_ELT_REVERSE (rtx, list, ix, setter)
989 struct mem_conflict_info mci;
991 /* Ignore entries in the list that do not apply. */
993 && DF_INSN_LUID (setter) < uid_limit)
995 && DF_INSN_LUID (setter) > uid_limit))
998 /* If SETTER is a call everything is clobbered. Note that calls
999 to pure functions are never put on the list, so we need not
1000 worry about them. */
1001 if (CALL_P (setter))
1004 /* SETTER must be an INSN of some kind that sets memory. Call
1005 note_stores to examine each hunk of memory that is modified. */
1007 mci.conflict = false;
1008 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1015 /* Return nonzero if the operands of expression X are unchanged from
1016 the start of INSN's basic block up to but not including INSN. */
1019 oprs_anticipatable_p (const_rtx x, const_rtx insn)
1021 return oprs_unchanged_p (x, insn, 0);
1024 /* Return nonzero if the operands of expression X are unchanged from
1025 INSN to the end of INSN's basic block. */
1028 oprs_available_p (const_rtx x, const_rtx insn)
1030 return oprs_unchanged_p (x, insn, 1);
1033 /* Hash expression X.
1035 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1036 indicating if a volatile operand is found or if the expression contains
1037 something we don't want to insert in the table. HASH_TABLE_SIZE is
1038 the current size of the hash table to be probed. */
1041 hash_expr (const_rtx x, enum machine_mode mode, int *do_not_record_p,
1042 int hash_table_size)
1046 *do_not_record_p = 0;
1048 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1049 return hash % hash_table_size;
1052 /* Return nonzero if exp1 is equivalent to exp2. */
1055 expr_equiv_p (const_rtx x, const_rtx y)
1057 return exp_equiv_p (x, y, 0, true);
1060 /* Insert expression X in INSN in the hash TABLE.
1061 If it is already present, record it as the last occurrence in INSN's
1064 MODE is the mode of the value X is being stored into.
1065 It is only used if X is a CONST_INT.
1067 ANTIC_P is nonzero if X is an anticipatable expression.
1068 AVAIL_P is nonzero if X is an available expression.
1070 MAX_DISTANCE is the maximum distance in instructions this expression can
1074 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1075 int avail_p, int max_distance, struct hash_table_d *table)
1077 int found, do_not_record_p;
1079 struct expr *cur_expr, *last_expr = NULL;
1080 struct occr *antic_occr, *avail_occr;
1082 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1084 /* Do not insert expression in table if it contains volatile operands,
1085 or if hash_expr determines the expression is something we don't want
1086 to or can't handle. */
1087 if (do_not_record_p)
1090 cur_expr = table->table[hash];
1093 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1095 /* If the expression isn't found, save a pointer to the end of
1097 last_expr = cur_expr;
1098 cur_expr = cur_expr->next_same_hash;
1103 cur_expr = GOBNEW (struct expr);
1104 bytes_used += sizeof (struct expr);
1105 if (table->table[hash] == NULL)
1106 /* This is the first pattern that hashed to this index. */
1107 table->table[hash] = cur_expr;
1109 /* Add EXPR to end of this hash chain. */
1110 last_expr->next_same_hash = cur_expr;
1112 /* Set the fields of the expr element. */
1114 cur_expr->bitmap_index = table->n_elems++;
1115 cur_expr->next_same_hash = NULL;
1116 cur_expr->antic_occr = NULL;
1117 cur_expr->avail_occr = NULL;
1118 gcc_assert (max_distance >= 0);
1119 cur_expr->max_distance = max_distance;
1122 gcc_assert (cur_expr->max_distance == max_distance);
1124 /* Now record the occurrence(s). */
1127 antic_occr = cur_expr->antic_occr;
1130 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1134 /* Found another instance of the expression in the same basic block.
1135 Prefer the currently recorded one. We want the first one in the
1136 block and the block is scanned from start to end. */
1137 ; /* nothing to do */
1140 /* First occurrence of this expression in this basic block. */
1141 antic_occr = GOBNEW (struct occr);
1142 bytes_used += sizeof (struct occr);
1143 antic_occr->insn = insn;
1144 antic_occr->next = cur_expr->antic_occr;
1145 antic_occr->deleted_p = 0;
1146 cur_expr->antic_occr = antic_occr;
1152 avail_occr = cur_expr->avail_occr;
1155 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1157 /* Found another instance of the expression in the same basic block.
1158 Prefer this occurrence to the currently recorded one. We want
1159 the last one in the block and the block is scanned from start
1161 avail_occr->insn = insn;
1165 /* First occurrence of this expression in this basic block. */
1166 avail_occr = GOBNEW (struct occr);
1167 bytes_used += sizeof (struct occr);
1168 avail_occr->insn = insn;
1169 avail_occr->next = cur_expr->avail_occr;
1170 avail_occr->deleted_p = 0;
1171 cur_expr->avail_occr = avail_occr;
1176 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1179 hash_scan_set (rtx set, rtx insn, struct hash_table_d *table)
1181 rtx src = SET_SRC (set);
1182 rtx dest = SET_DEST (set);
1185 if (GET_CODE (src) == CALL)
1186 hash_scan_call (src, insn, table);
1188 else if (REG_P (dest))
1190 unsigned int regno = REGNO (dest);
1191 int max_distance = 0;
1193 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1195 This allows us to do a single GCSE pass and still eliminate
1196 redundant constants, addresses or other expressions that are
1197 constructed with multiple instructions.
1199 However, keep the original SRC if INSN is a simple reg-reg move.
1200 In this case, there will almost always be a REG_EQUAL note on the
1201 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1202 for INSN, we miss copy propagation opportunities and we perform the
1203 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1204 do more than one PRE GCSE pass.
1206 Note that this does not impede profitable constant propagations. We
1207 "look through" reg-reg sets in lookup_avail_set. */
1208 note = find_reg_equal_equiv_note (insn);
1210 && REG_NOTE_KIND (note) == REG_EQUAL
1212 && want_to_gcse_p (XEXP (note, 0), NULL))
1213 src = XEXP (note, 0), set = gen_rtx_SET (VOIDmode, dest, src);
1215 /* Only record sets of pseudo-regs in the hash table. */
1216 if (regno >= FIRST_PSEUDO_REGISTER
1217 /* Don't GCSE something if we can't do a reg/reg copy. */
1218 && can_copy_p (GET_MODE (dest))
1219 /* GCSE commonly inserts instruction after the insn. We can't
1220 do that easily for EH edges so disable GCSE on these for now. */
1221 /* ??? We can now easily create new EH landing pads at the
1222 gimple level, for splitting edges; there's no reason we
1223 can't do the same thing at the rtl level. */
1224 && !can_throw_internal (insn)
1225 /* Is SET_SRC something we want to gcse? */
1226 && want_to_gcse_p (src, &max_distance)
1227 /* Don't CSE a nop. */
1228 && ! set_noop_p (set)
1229 /* Don't GCSE if it has attached REG_EQUIV note.
1230 At this point this only function parameters should have
1231 REG_EQUIV notes and if the argument slot is used somewhere
1232 explicitly, it means address of parameter has been taken,
1233 so we should not extend the lifetime of the pseudo. */
1234 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1236 /* An expression is not anticipatable if its operands are
1237 modified before this insn or if this is not the only SET in
1238 this insn. The latter condition does not have to mean that
1239 SRC itself is not anticipatable, but we just will not be
1240 able to handle code motion of insns with multiple sets. */
1241 int antic_p = oprs_anticipatable_p (src, insn)
1242 && !multiple_sets (insn);
1243 /* An expression is not available if its operands are
1244 subsequently modified, including this insn. It's also not
1245 available if this is a branch, because we can't insert
1246 a set after the branch. */
1247 int avail_p = (oprs_available_p (src, insn)
1248 && ! JUMP_P (insn));
1250 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1251 max_distance, table);
1254 /* In case of store we want to consider the memory value as available in
1255 the REG stored in that memory. This makes it possible to remove
1256 redundant loads from due to stores to the same location. */
1257 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1259 unsigned int regno = REGNO (src);
1260 int max_distance = 0;
1262 /* Only record sets of pseudo-regs in the hash table. */
1263 if (regno >= FIRST_PSEUDO_REGISTER
1264 /* Don't GCSE something if we can't do a reg/reg copy. */
1265 && can_copy_p (GET_MODE (src))
1266 /* GCSE commonly inserts instruction after the insn. We can't
1267 do that easily for EH edges so disable GCSE on these for now. */
1268 && !can_throw_internal (insn)
1269 /* Is SET_DEST something we want to gcse? */
1270 && want_to_gcse_p (dest, &max_distance)
1271 /* Don't CSE a nop. */
1272 && ! set_noop_p (set)
1273 /* Don't GCSE if it has attached REG_EQUIV note.
1274 At this point this only function parameters should have
1275 REG_EQUIV notes and if the argument slot is used somewhere
1276 explicitly, it means address of parameter has been taken,
1277 so we should not extend the lifetime of the pseudo. */
1278 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1279 || ! MEM_P (XEXP (note, 0))))
1281 /* Stores are never anticipatable. */
1283 /* An expression is not available if its operands are
1284 subsequently modified, including this insn. It's also not
1285 available if this is a branch, because we can't insert
1286 a set after the branch. */
1287 int avail_p = oprs_available_p (dest, insn)
1290 /* Record the memory expression (DEST) in the hash table. */
1291 insert_expr_in_table (dest, GET_MODE (dest), insn,
1292 antic_p, avail_p, max_distance, table);
1298 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1299 struct hash_table_d *table ATTRIBUTE_UNUSED)
1301 /* Currently nothing to do. */
1305 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1306 struct hash_table_d *table ATTRIBUTE_UNUSED)
1308 /* Currently nothing to do. */
1311 /* Process INSN and add hash table entries as appropriate. */
1314 hash_scan_insn (rtx insn, struct hash_table_d *table)
1316 rtx pat = PATTERN (insn);
1319 /* Pick out the sets of INSN and for other forms of instructions record
1320 what's been modified. */
1322 if (GET_CODE (pat) == SET)
1323 hash_scan_set (pat, insn, table);
1325 else if (GET_CODE (pat) == CLOBBER)
1326 hash_scan_clobber (pat, insn, table);
1328 else if (GET_CODE (pat) == CALL)
1329 hash_scan_call (pat, insn, table);
1331 else if (GET_CODE (pat) == PARALLEL)
1332 for (i = 0; i < XVECLEN (pat, 0); i++)
1334 rtx x = XVECEXP (pat, 0, i);
1336 if (GET_CODE (x) == SET)
1337 hash_scan_set (x, insn, table);
1338 else if (GET_CODE (x) == CLOBBER)
1339 hash_scan_clobber (x, insn, table);
1340 else if (GET_CODE (x) == CALL)
1341 hash_scan_call (x, insn, table);
1345 /* Dump the hash table TABLE to file FILE under the name NAME. */
1348 dump_hash_table (FILE *file, const char *name, struct hash_table_d *table)
1351 /* Flattened out table, so it's printed in proper order. */
1352 struct expr **flat_table;
1353 unsigned int *hash_val;
1356 flat_table = XCNEWVEC (struct expr *, table->n_elems);
1357 hash_val = XNEWVEC (unsigned int, table->n_elems);
1359 for (i = 0; i < (int) table->size; i++)
1360 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1362 flat_table[expr->bitmap_index] = expr;
1363 hash_val[expr->bitmap_index] = i;
1366 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1367 name, table->size, table->n_elems);
1369 for (i = 0; i < (int) table->n_elems; i++)
1370 if (flat_table[i] != 0)
1372 expr = flat_table[i];
1373 fprintf (file, "Index %d (hash value %d; max distance %d)\n ",
1374 expr->bitmap_index, hash_val[i], expr->max_distance);
1375 print_rtl (file, expr->expr);
1376 fprintf (file, "\n");
1379 fprintf (file, "\n");
1385 /* Record register first/last/block set information for REGNO in INSN.
1387 first_set records the first place in the block where the register
1388 is set and is used to compute "anticipatability".
1390 last_set records the last place in the block where the register
1391 is set and is used to compute "availability".
1393 last_bb records the block for which first_set and last_set are
1394 valid, as a quick test to invalidate them. */
1397 record_last_reg_set_info (rtx insn, int regno)
1399 struct reg_avail_info *info = ®_avail_info[regno];
1400 int luid = DF_INSN_LUID (insn);
1402 info->last_set = luid;
1403 if (info->last_bb != current_bb)
1405 info->last_bb = current_bb;
1406 info->first_set = luid;
1410 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1411 Note we store a pair of elements in the list, so they have to be
1412 taken off pairwise. */
1415 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, const_rtx x ATTRIBUTE_UNUSED,
1418 rtx dest_addr, insn;
1422 while (GET_CODE (dest) == SUBREG
1423 || GET_CODE (dest) == ZERO_EXTRACT
1424 || GET_CODE (dest) == STRICT_LOW_PART)
1425 dest = XEXP (dest, 0);
1427 /* If DEST is not a MEM, then it will not conflict with a load. Note
1428 that function calls are assumed to clobber memory, but are handled
1434 dest_addr = get_addr (XEXP (dest, 0));
1435 dest_addr = canon_rtx (dest_addr);
1436 insn = (rtx) v_insn;
1437 bb = BLOCK_FOR_INSN (insn)->index;
1440 pair.dest_addr = dest_addr;
1441 VEC_safe_push (modify_pair, heap, canon_modify_mem_list[bb], pair);
1444 /* Record memory modification information for INSN. We do not actually care
1445 about the memory location(s) that are set, or even how they are set (consider
1446 a CALL_INSN). We merely need to record which insns modify memory. */
1449 record_last_mem_set_info (rtx insn)
1451 int bb = BLOCK_FOR_INSN (insn)->index;
1453 /* load_killed_in_block_p will handle the case of calls clobbering
1455 VEC_safe_push (rtx, heap, modify_mem_list[bb], insn);
1456 bitmap_set_bit (modify_mem_list_set, bb);
1459 bitmap_set_bit (blocks_with_calls, bb);
1461 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1464 /* Called from compute_hash_table via note_stores to handle one
1465 SET or CLOBBER in an insn. DATA is really the instruction in which
1466 the SET is taking place. */
1469 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1471 rtx last_set_insn = (rtx) data;
1473 if (GET_CODE (dest) == SUBREG)
1474 dest = SUBREG_REG (dest);
1477 record_last_reg_set_info (last_set_insn, REGNO (dest));
1478 else if (MEM_P (dest)
1479 /* Ignore pushes, they clobber nothing. */
1480 && ! push_operand (dest, GET_MODE (dest)))
1481 record_last_mem_set_info (last_set_insn);
1484 /* Top level function to create an expression hash table.
1486 Expression entries are placed in the hash table if
1487 - they are of the form (set (pseudo-reg) src),
1488 - src is something we want to perform GCSE on,
1489 - none of the operands are subsequently modified in the block
1491 Currently src must be a pseudo-reg or a const_int.
1493 TABLE is the table computed. */
1496 compute_hash_table_work (struct hash_table_d *table)
1500 /* re-Cache any INSN_LIST nodes we have allocated. */
1501 clear_modify_mem_tables ();
1502 /* Some working arrays used to track first and last set in each block. */
1503 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1505 for (i = 0; i < max_reg_num (); ++i)
1506 reg_avail_info[i].last_bb = NULL;
1508 FOR_EACH_BB (current_bb)
1513 /* First pass over the instructions records information used to
1514 determine when registers and memory are first and last set. */
1515 FOR_BB_INSNS (current_bb, insn)
1517 if (!NONDEBUG_INSN_P (insn))
1522 hard_reg_set_iterator hrsi;
1523 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1525 record_last_reg_set_info (insn, regno);
1527 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1528 record_last_mem_set_info (insn);
1531 note_stores (PATTERN (insn), record_last_set_info, insn);
1534 /* The next pass builds the hash table. */
1535 FOR_BB_INSNS (current_bb, insn)
1536 if (NONDEBUG_INSN_P (insn))
1537 hash_scan_insn (insn, table);
1540 free (reg_avail_info);
1541 reg_avail_info = NULL;
1544 /* Allocate space for the set/expr hash TABLE.
1545 It is used to determine the number of buckets to use. */
1548 alloc_hash_table (struct hash_table_d *table)
1552 n = get_max_insn_count ();
1554 table->size = n / 4;
1555 if (table->size < 11)
1558 /* Attempt to maintain efficient use of hash table.
1559 Making it an odd number is simplest for now.
1560 ??? Later take some measurements. */
1562 n = table->size * sizeof (struct expr *);
1563 table->table = GNEWVAR (struct expr *, n);
1566 /* Free things allocated by alloc_hash_table. */
1569 free_hash_table (struct hash_table_d *table)
1571 free (table->table);
1574 /* Compute the expression hash table TABLE. */
1577 compute_hash_table (struct hash_table_d *table)
1579 /* Initialize count of number of entries in hash table. */
1581 memset (table->table, 0, table->size * sizeof (struct expr *));
1583 compute_hash_table_work (table);
1586 /* Expression tracking support. */
1588 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1590 clear_modify_mem_tables (void)
1595 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1597 VEC_free (rtx, heap, modify_mem_list[i]);
1598 VEC_free (modify_pair, heap, canon_modify_mem_list[i]);
1600 bitmap_clear (modify_mem_list_set);
1601 bitmap_clear (blocks_with_calls);
1604 /* Release memory used by modify_mem_list_set. */
1607 free_modify_mem_tables (void)
1609 clear_modify_mem_tables ();
1610 free (modify_mem_list);
1611 free (canon_modify_mem_list);
1612 modify_mem_list = 0;
1613 canon_modify_mem_list = 0;
1616 /* For each block, compute whether X is transparent. X is either an
1617 expression or an assignment [though we don't care which, for this context
1618 an assignment is treated as an expression]. For each block where an
1619 element of X is modified, reset the INDX bit in BMAP. */
1622 compute_transp (const_rtx x, int indx, sbitmap *bmap)
1628 /* repeat is used to turn tail-recursion into iteration since GCC
1629 can't do it when there's no return value. */
1635 code = GET_CODE (x);
1641 for (def = DF_REG_DEF_CHAIN (REGNO (x));
1643 def = DF_REF_NEXT_REG (def))
1644 RESET_BIT (bmap[DF_REF_BB (def)->index], indx);
1650 if (! MEM_READONLY_P (x))
1655 /* First handle all the blocks with calls. We don't need to
1656 do any list walking for them. */
1657 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
1659 RESET_BIT (bmap[bb_index], indx);
1662 /* Now iterate over the blocks which have memory modifications
1663 but which do not have any calls. */
1664 EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
1668 VEC (modify_pair,heap) *list
1669 = canon_modify_mem_list[bb_index];
1673 FOR_EACH_VEC_ELT_REVERSE (modify_pair, list, ix, pair)
1675 rtx dest = pair->dest;
1676 rtx dest_addr = pair->dest_addr;
1678 if (canon_true_dependence (dest, GET_MODE (dest),
1679 dest_addr, x, NULL_RTX))
1680 RESET_BIT (bmap[bb_index], indx);
1702 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1706 /* If we are about to do the last recursive call
1707 needed at this level, change it into iteration.
1708 This function is called enough to be worth it. */
1715 compute_transp (XEXP (x, i), indx, bmap);
1717 else if (fmt[i] == 'E')
1718 for (j = 0; j < XVECLEN (x, i); j++)
1719 compute_transp (XVECEXP (x, i, j), indx, bmap);
1723 /* Compute PRE+LCM working variables. */
1725 /* Local properties of expressions. */
1727 /* Nonzero for expressions that are transparent in the block. */
1728 static sbitmap *transp;
1730 /* Nonzero for expressions that are computed (available) in the block. */
1731 static sbitmap *comp;
1733 /* Nonzero for expressions that are locally anticipatable in the block. */
1734 static sbitmap *antloc;
1736 /* Nonzero for expressions where this block is an optimal computation
1738 static sbitmap *pre_optimal;
1740 /* Nonzero for expressions which are redundant in a particular block. */
1741 static sbitmap *pre_redundant;
1743 /* Nonzero for expressions which should be inserted on a specific edge. */
1744 static sbitmap *pre_insert_map;
1746 /* Nonzero for expressions which should be deleted in a specific block. */
1747 static sbitmap *pre_delete_map;
1749 /* Allocate vars used for PRE analysis. */
1752 alloc_pre_mem (int n_blocks, int n_exprs)
1754 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1755 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1756 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1759 pre_redundant = NULL;
1760 pre_insert_map = NULL;
1761 pre_delete_map = NULL;
1762 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1764 /* pre_insert and pre_delete are allocated later. */
1767 /* Free vars used for PRE analysis. */
1772 sbitmap_vector_free (transp);
1773 sbitmap_vector_free (comp);
1775 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1778 sbitmap_vector_free (pre_optimal);
1780 sbitmap_vector_free (pre_redundant);
1782 sbitmap_vector_free (pre_insert_map);
1784 sbitmap_vector_free (pre_delete_map);
1786 transp = comp = NULL;
1787 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1790 /* Remove certain expressions from anticipatable and transparent
1791 sets of basic blocks that have incoming abnormal edge.
1792 For PRE remove potentially trapping expressions to avoid placing
1793 them on abnormal edges. For hoisting remove memory references that
1794 can be clobbered by calls. */
1797 prune_expressions (bool pre_p)
1799 sbitmap prune_exprs;
1804 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
1805 sbitmap_zero (prune_exprs);
1806 for (ui = 0; ui < expr_hash_table.size; ui++)
1808 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1810 /* Note potentially trapping expressions. */
1811 if (may_trap_p (expr->expr))
1813 SET_BIT (prune_exprs, expr->bitmap_index);
1817 if (!pre_p && MEM_P (expr->expr))
1818 /* Note memory references that can be clobbered by a call.
1819 We do not split abnormal edges in hoisting, so would
1820 a memory reference get hoisted along an abnormal edge,
1821 it would be placed /before/ the call. Therefore, only
1822 constant memory references can be hoisted along abnormal
1825 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
1826 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
1829 if (MEM_READONLY_P (expr->expr)
1830 && !MEM_VOLATILE_P (expr->expr)
1831 && MEM_NOTRAP_P (expr->expr))
1832 /* Constant memory reference, e.g., a PIC address. */
1835 /* ??? Optimally, we would use interprocedural alias
1836 analysis to determine if this mem is actually killed
1839 SET_BIT (prune_exprs, expr->bitmap_index);
1849 /* If the current block is the destination of an abnormal edge, we
1850 kill all trapping (for PRE) and memory (for hoist) expressions
1851 because we won't be able to properly place the instruction on
1852 the edge. So make them neither anticipatable nor transparent.
1853 This is fairly conservative.
1855 ??? For hoisting it may be necessary to check for set-and-jump
1856 instructions here, not just for abnormal edges. The general problem
1857 is that when an expression cannot not be placed right at the end of
1858 a basic block we should account for any side-effects of a subsequent
1859 jump instructions that could clobber the expression. It would
1860 be best to implement this check along the lines of
1861 hoist_expr_reaches_here_p where the target block is already known
1862 and, hence, there's no need to conservatively prune expressions on
1863 "intermediate" set-and-jump instructions. */
1864 FOR_EACH_EDGE (e, ei, bb->preds)
1865 if ((e->flags & EDGE_ABNORMAL)
1866 && (pre_p || CALL_P (BB_END (e->src))))
1868 sbitmap_difference (antloc[bb->index],
1869 antloc[bb->index], prune_exprs);
1870 sbitmap_difference (transp[bb->index],
1871 transp[bb->index], prune_exprs);
1876 sbitmap_free (prune_exprs);
1879 /* It may be necessary to insert a large number of insns on edges to
1880 make the existing occurrences of expressions fully redundant. This
1881 routine examines the set of insertions and deletions and if the ratio
1882 of insertions to deletions is too high for a particular expression, then
1883 the expression is removed from the insertion/deletion sets.
1885 N_ELEMS is the number of elements in the hash table. */
1888 prune_insertions_deletions (int n_elems)
1890 sbitmap_iterator sbi;
1891 sbitmap prune_exprs;
1893 /* We always use I to iterate over blocks/edges and J to iterate over
1897 /* Counts for the number of times an expression needs to be inserted and
1898 number of times an expression can be removed as a result. */
1899 int *insertions = GCNEWVEC (int, n_elems);
1900 int *deletions = GCNEWVEC (int, n_elems);
1902 /* Set of expressions which require too many insertions relative to
1903 the number of deletions achieved. We will prune these out of the
1904 insertion/deletion sets. */
1905 prune_exprs = sbitmap_alloc (n_elems);
1906 sbitmap_zero (prune_exprs);
1908 /* Iterate over the edges counting the number of times each expression
1909 needs to be inserted. */
1910 for (i = 0; i < (unsigned) n_edges; i++)
1912 EXECUTE_IF_SET_IN_SBITMAP (pre_insert_map[i], 0, j, sbi)
1916 /* Similarly for deletions, but those occur in blocks rather than on
1918 for (i = 0; i < (unsigned) last_basic_block; i++)
1920 EXECUTE_IF_SET_IN_SBITMAP (pre_delete_map[i], 0, j, sbi)
1924 /* Now that we have accurate counts, iterate over the elements in the
1925 hash table and see if any need too many insertions relative to the
1926 number of evaluations that can be removed. If so, mark them in
1928 for (j = 0; j < (unsigned) n_elems; j++)
1930 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1931 SET_BIT (prune_exprs, j);
1933 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1934 EXECUTE_IF_SET_IN_SBITMAP (prune_exprs, 0, j, sbi)
1936 for (i = 0; i < (unsigned) n_edges; i++)
1937 RESET_BIT (pre_insert_map[i], j);
1939 for (i = 0; i < (unsigned) last_basic_block; i++)
1940 RESET_BIT (pre_delete_map[i], j);
1943 sbitmap_free (prune_exprs);
1948 /* Top level routine to do the dataflow analysis needed by PRE. */
1950 static struct edge_list *
1951 compute_pre_data (void)
1953 struct edge_list *edge_list;
1956 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1957 prune_expressions (true);
1958 sbitmap_vector_zero (ae_kill, last_basic_block);
1960 /* Compute ae_kill for each basic block using:
1967 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1968 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1971 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1972 ae_kill, &pre_insert_map, &pre_delete_map);
1973 sbitmap_vector_free (antloc);
1975 sbitmap_vector_free (ae_kill);
1978 prune_insertions_deletions (expr_hash_table.n_elems);
1985 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1988 VISITED is a pointer to a working buffer for tracking which BB's have
1989 been visited. It is NULL for the top-level call.
1991 We treat reaching expressions that go through blocks containing the same
1992 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
1993 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1994 2 as not reaching. The intent is to improve the probability of finding
1995 only one reaching expression and to reduce register lifetimes by picking
1996 the closest such expression. */
1999 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr,
2000 basic_block bb, char *visited)
2005 FOR_EACH_EDGE (pred, ei, bb->preds)
2007 basic_block pred_bb = pred->src;
2009 if (pred->src == ENTRY_BLOCK_PTR
2010 /* Has predecessor has already been visited? */
2011 || visited[pred_bb->index])
2012 ;/* Nothing to do. */
2014 /* Does this predecessor generate this expression? */
2015 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
2017 /* Is this the occurrence we're looking for?
2018 Note that there's only one generating occurrence per block
2019 so we just need to check the block number. */
2020 if (occr_bb == pred_bb)
2023 visited[pred_bb->index] = 1;
2025 /* Ignore this predecessor if it kills the expression. */
2026 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
2027 visited[pred_bb->index] = 1;
2029 /* Neither gen nor kill. */
2032 visited[pred_bb->index] = 1;
2033 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
2038 /* All paths have been checked. */
2042 /* The wrapper for pre_expr_reaches_here_work that ensures that any
2043 memory allocated for that function is returned. */
2046 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
2049 char *visited = XCNEWVEC (char, last_basic_block);
2051 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
2057 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
2060 process_insert_insn (struct expr *expr)
2062 rtx reg = expr->reaching_reg;
2063 /* Copy the expression to make sure we don't have any sharing issues. */
2064 rtx exp = copy_rtx (expr->expr);
2069 /* If the expression is something that's an operand, like a constant,
2070 just copy it to a register. */
2071 if (general_operand (exp, GET_MODE (reg)))
2072 emit_move_insn (reg, exp);
2074 /* Otherwise, make a new insn to compute this expression and make sure the
2075 insn will be recognized (this also adds any needed CLOBBERs). */
2078 rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
2080 if (insn_invalid_p (insn, false))
2090 /* Add EXPR to the end of basic block BB.
2092 This is used by both the PRE and code hoisting. */
2095 insert_insn_end_basic_block (struct expr *expr, basic_block bb)
2097 rtx insn = BB_END (bb);
2099 rtx reg = expr->reaching_reg;
2100 int regno = REGNO (reg);
2103 pat = process_insert_insn (expr);
2104 gcc_assert (pat && INSN_P (pat));
2107 while (NEXT_INSN (pat_end) != NULL_RTX)
2108 pat_end = NEXT_INSN (pat_end);
2110 /* If the last insn is a jump, insert EXPR in front [taking care to
2111 handle cc0, etc. properly]. Similarly we need to care trapping
2112 instructions in presence of non-call exceptions. */
2115 || (NONJUMP_INSN_P (insn)
2116 && (!single_succ_p (bb)
2117 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2123 /* If this is a jump table, then we can't insert stuff here. Since
2124 we know the previous real insn must be the tablejump, we insert
2125 the new instruction just before the tablejump. */
2126 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
2127 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
2128 insn = prev_active_insn (insn);
2131 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2132 if cc0 isn't set. */
2133 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2135 insn = XEXP (note, 0);
2138 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
2139 if (maybe_cc0_setter
2140 && INSN_P (maybe_cc0_setter)
2141 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2142 insn = maybe_cc0_setter;
2145 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2146 new_insn = emit_insn_before_noloc (pat, insn, bb);
2149 /* Likewise if the last insn is a call, as will happen in the presence
2150 of exception handling. */
2151 else if (CALL_P (insn)
2152 && (!single_succ_p (bb)
2153 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2155 /* Keeping in mind targets with small register classes and parameters
2156 in registers, we search backward and place the instructions before
2157 the first parameter is loaded. Do this for everyone for consistency
2158 and a presumption that we'll get better code elsewhere as well. */
2160 /* Since different machines initialize their parameter registers
2161 in different orders, assume nothing. Collect the set of all
2162 parameter registers. */
2163 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2165 /* If we found all the parameter loads, then we want to insert
2166 before the first parameter load.
2168 If we did not find all the parameter loads, then we might have
2169 stopped on the head of the block, which could be a CODE_LABEL.
2170 If we inserted before the CODE_LABEL, then we would be putting
2171 the insn in the wrong basic block. In that case, put the insn
2172 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2173 while (LABEL_P (insn)
2174 || NOTE_INSN_BASIC_BLOCK_P (insn))
2175 insn = NEXT_INSN (insn);
2177 new_insn = emit_insn_before_noloc (pat, insn, bb);
2180 new_insn = emit_insn_after_noloc (pat, insn, bb);
2185 add_label_notes (PATTERN (pat), new_insn);
2188 pat = NEXT_INSN (pat);
2191 gcse_create_count++;
2195 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2196 bb->index, INSN_UID (new_insn));
2197 fprintf (dump_file, "copying expression %d to reg %d\n",
2198 expr->bitmap_index, regno);
2202 /* Insert partially redundant expressions on edges in the CFG to make
2203 the expressions fully redundant. */
2206 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
2208 int e, i, j, num_edges, set_size, did_insert = 0;
2211 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2212 if it reaches any of the deleted expressions. */
2214 set_size = pre_insert_map[0]->size;
2215 num_edges = NUM_EDGES (edge_list);
2216 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2217 sbitmap_vector_zero (inserted, num_edges);
2219 for (e = 0; e < num_edges; e++)
2222 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2224 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2226 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2229 insert && j < (int) expr_hash_table.n_elems;
2231 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2233 struct expr *expr = index_map[j];
2236 /* Now look at each deleted occurrence of this expression. */
2237 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2239 if (! occr->deleted_p)
2242 /* Insert this expression on this edge if it would
2243 reach the deleted occurrence in BB. */
2244 if (!TEST_BIT (inserted[e], j))
2247 edge eg = INDEX_EDGE (edge_list, e);
2249 /* We can't insert anything on an abnormal and
2250 critical edge, so we insert the insn at the end of
2251 the previous block. There are several alternatives
2252 detailed in Morgans book P277 (sec 10.5) for
2253 handling this situation. This one is easiest for
2256 if (eg->flags & EDGE_ABNORMAL)
2257 insert_insn_end_basic_block (index_map[j], bb);
2260 insn = process_insert_insn (index_map[j]);
2261 insert_insn_on_edge (insn, eg);
2266 fprintf (dump_file, "PRE: edge (%d,%d), ",
2268 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2269 fprintf (dump_file, "copy expression %d\n",
2270 expr->bitmap_index);
2273 update_ld_motion_stores (expr);
2274 SET_BIT (inserted[e], j);
2276 gcse_create_count++;
2283 sbitmap_vector_free (inserted);
2287 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2288 Given "old_reg <- expr" (INSN), instead of adding after it
2289 reaching_reg <- old_reg
2290 it's better to do the following:
2291 reaching_reg <- expr
2292 old_reg <- reaching_reg
2293 because this way copy propagation can discover additional PRE
2294 opportunities. But if this fails, we try the old way.
2295 When "expr" is a store, i.e.
2296 given "MEM <- old_reg", instead of adding after it
2297 reaching_reg <- old_reg
2298 it's better to add it before as follows:
2299 reaching_reg <- old_reg
2300 MEM <- reaching_reg. */
2303 pre_insert_copy_insn (struct expr *expr, rtx insn)
2305 rtx reg = expr->reaching_reg;
2306 int regno = REGNO (reg);
2307 int indx = expr->bitmap_index;
2308 rtx pat = PATTERN (insn);
2309 rtx set, first_set, new_insn;
2313 /* This block matches the logic in hash_scan_insn. */
2314 switch (GET_CODE (pat))
2321 /* Search through the parallel looking for the set whose
2322 source was the expression that we're interested in. */
2323 first_set = NULL_RTX;
2325 for (i = 0; i < XVECLEN (pat, 0); i++)
2327 rtx x = XVECEXP (pat, 0, i);
2328 if (GET_CODE (x) == SET)
2330 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2331 may not find an equivalent expression, but in this
2332 case the PARALLEL will have a single set. */
2333 if (first_set == NULL_RTX)
2335 if (expr_equiv_p (SET_SRC (x), expr->expr))
2343 gcc_assert (first_set);
2344 if (set == NULL_RTX)
2352 if (REG_P (SET_DEST (set)))
2354 old_reg = SET_DEST (set);
2355 /* Check if we can modify the set destination in the original insn. */
2356 if (validate_change (insn, &SET_DEST (set), reg, 0))
2358 new_insn = gen_move_insn (old_reg, reg);
2359 new_insn = emit_insn_after (new_insn, insn);
2363 new_insn = gen_move_insn (reg, old_reg);
2364 new_insn = emit_insn_after (new_insn, insn);
2367 else /* This is possible only in case of a store to memory. */
2369 old_reg = SET_SRC (set);
2370 new_insn = gen_move_insn (reg, old_reg);
2372 /* Check if we can modify the set source in the original insn. */
2373 if (validate_change (insn, &SET_SRC (set), reg, 0))
2374 new_insn = emit_insn_before (new_insn, insn);
2376 new_insn = emit_insn_after (new_insn, insn);
2379 gcse_create_count++;
2383 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2384 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2385 INSN_UID (insn), regno);
2388 /* Copy available expressions that reach the redundant expression
2389 to `reaching_reg'. */
2392 pre_insert_copies (void)
2394 unsigned int i, added_copy;
2399 /* For each available expression in the table, copy the result to
2400 `reaching_reg' if the expression reaches a deleted one.
2402 ??? The current algorithm is rather brute force.
2403 Need to do some profiling. */
2405 for (i = 0; i < expr_hash_table.size; i++)
2406 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2408 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2409 we don't want to insert a copy here because the expression may not
2410 really be redundant. So only insert an insn if the expression was
2411 deleted. This test also avoids further processing if the
2412 expression wasn't deleted anywhere. */
2413 if (expr->reaching_reg == NULL)
2416 /* Set when we add a copy for that expression. */
2419 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2421 if (! occr->deleted_p)
2424 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2426 rtx insn = avail->insn;
2428 /* No need to handle this one if handled already. */
2429 if (avail->copied_p)
2432 /* Don't handle this one if it's a redundant one. */
2433 if (INSN_DELETED_P (insn))
2436 /* Or if the expression doesn't reach the deleted one. */
2437 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2439 BLOCK_FOR_INSN (occr->insn)))
2444 /* Copy the result of avail to reaching_reg. */
2445 pre_insert_copy_insn (expr, insn);
2446 avail->copied_p = 1;
2451 update_ld_motion_stores (expr);
2455 /* Emit move from SRC to DEST noting the equivalence with expression computed
2459 gcse_emit_move_after (rtx dest, rtx src, rtx insn)
2462 rtx set = single_set (insn), set2;
2466 /* This should never fail since we're creating a reg->reg copy
2467 we've verified to be valid. */
2469 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2471 /* Note the equivalence for local CSE pass. */
2472 set2 = single_set (new_rtx);
2473 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2475 if ((note = find_reg_equal_equiv_note (insn)))
2476 eqv = XEXP (note, 0);
2478 eqv = SET_SRC (set);
2480 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2485 /* Delete redundant computations.
2486 Deletion is done by changing the insn to copy the `reaching_reg' of
2487 the expression into the result of the SET. It is left to later passes
2488 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
2490 Return nonzero if a change is made. */
2501 for (i = 0; i < expr_hash_table.size; i++)
2502 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2504 int indx = expr->bitmap_index;
2506 /* We only need to search antic_occr since we require ANTLOC != 0. */
2507 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2509 rtx insn = occr->insn;
2511 basic_block bb = BLOCK_FOR_INSN (insn);
2513 /* We only delete insns that have a single_set. */
2514 if (TEST_BIT (pre_delete_map[bb->index], indx)
2515 && (set = single_set (insn)) != 0
2516 && dbg_cnt (pre_insn))
2518 /* Create a pseudo-reg to store the result of reaching
2519 expressions into. Get the mode for the new pseudo from
2520 the mode of the original destination pseudo. */
2521 if (expr->reaching_reg == NULL)
2522 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2524 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2526 occr->deleted_p = 1;
2533 "PRE: redundant insn %d (expression %d) in ",
2534 INSN_UID (insn), indx);
2535 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2536 bb->index, REGNO (expr->reaching_reg));
2545 /* Perform GCSE optimizations using PRE.
2546 This is called by one_pre_gcse_pass after all the dataflow analysis
2549 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2550 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2551 Compiler Design and Implementation.
2553 ??? A new pseudo reg is created to hold the reaching expression. The nice
2554 thing about the classical approach is that it would try to use an existing
2555 reg. If the register can't be adequately optimized [i.e. we introduce
2556 reload problems], one could add a pass here to propagate the new register
2559 ??? We don't handle single sets in PARALLELs because we're [currently] not
2560 able to copy the rest of the parallel when we insert copies to create full
2561 redundancies from partial redundancies. However, there's no reason why we
2562 can't handle PARALLELs in the cases where there are no partial
2566 pre_gcse (struct edge_list *edge_list)
2569 int did_insert, changed;
2570 struct expr **index_map;
2573 /* Compute a mapping from expression number (`bitmap_index') to
2574 hash table entry. */
2576 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
2577 for (i = 0; i < expr_hash_table.size; i++)
2578 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2579 index_map[expr->bitmap_index] = expr;
2581 /* Delete the redundant insns first so that
2582 - we know what register to use for the new insns and for the other
2583 ones with reaching expressions
2584 - we know which insns are redundant when we go to create copies */
2586 changed = pre_delete ();
2587 did_insert = pre_edge_insert (edge_list, index_map);
2589 /* In other places with reaching expressions, copy the expression to the
2590 specially allocated pseudo-reg that reaches the redundant expr. */
2591 pre_insert_copies ();
2594 commit_edge_insertions ();
2602 /* Top level routine to perform one PRE GCSE pass.
2604 Return nonzero if a change was made. */
2607 one_pre_gcse_pass (void)
2611 gcse_subst_count = 0;
2612 gcse_create_count = 0;
2614 /* Return if there's nothing to do, or it is too expensive. */
2615 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
2616 || is_too_expensive (_("PRE disabled")))
2619 /* We need alias. */
2620 init_alias_analysis ();
2623 gcc_obstack_init (&gcse_obstack);
2626 alloc_hash_table (&expr_hash_table);
2627 add_noreturn_fake_exit_edges ();
2629 compute_ld_motion_mems ();
2631 compute_hash_table (&expr_hash_table);
2633 trim_ld_motion_mems ();
2635 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2637 if (expr_hash_table.n_elems > 0)
2639 struct edge_list *edge_list;
2640 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
2641 edge_list = compute_pre_data ();
2642 changed |= pre_gcse (edge_list);
2643 free_edge_list (edge_list);
2648 free_ld_motion_mems ();
2649 remove_fake_exit_edges ();
2650 free_hash_table (&expr_hash_table);
2653 obstack_free (&gcse_obstack, NULL);
2655 /* We are finished with alias. */
2656 end_alias_analysis ();
2660 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2661 current_function_name (), n_basic_blocks, bytes_used);
2662 fprintf (dump_file, "%d substs, %d insns created\n",
2663 gcse_subst_count, gcse_create_count);
2669 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2670 to INSN. If such notes are added to an insn which references a
2671 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2672 that note, because the following loop optimization pass requires
2675 /* ??? If there was a jump optimization pass after gcse and before loop,
2676 then we would not need to do this here, because jump would add the
2677 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2680 add_label_notes (rtx x, rtx insn)
2682 enum rtx_code code = GET_CODE (x);
2686 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2688 /* This code used to ignore labels that referred to dispatch tables to
2689 avoid flow generating (slightly) worse code.
2691 We no longer ignore such label references (see LABEL_REF handling in
2692 mark_jump_label for additional information). */
2694 /* There's no reason for current users to emit jump-insns with
2695 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2697 gcc_assert (!JUMP_P (insn));
2698 add_reg_note (insn, REG_LABEL_OPERAND, XEXP (x, 0));
2700 if (LABEL_P (XEXP (x, 0)))
2701 LABEL_NUSES (XEXP (x, 0))++;
2706 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2709 add_label_notes (XEXP (x, i), insn);
2710 else if (fmt[i] == 'E')
2711 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2712 add_label_notes (XVECEXP (x, i, j), insn);
2716 /* Code Hoisting variables and subroutines. */
2718 /* Very busy expressions. */
2719 static sbitmap *hoist_vbein;
2720 static sbitmap *hoist_vbeout;
2722 /* ??? We could compute post dominators and run this algorithm in
2723 reverse to perform tail merging, doing so would probably be
2724 more effective than the tail merging code in jump.c.
2726 It's unclear if tail merging could be run in parallel with
2727 code hoisting. It would be nice. */
2729 /* Allocate vars used for code hoisting analysis. */
2732 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2734 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2735 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2736 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2738 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2739 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2742 /* Free vars used for code hoisting analysis. */
2745 free_code_hoist_mem (void)
2747 sbitmap_vector_free (antloc);
2748 sbitmap_vector_free (transp);
2749 sbitmap_vector_free (comp);
2751 sbitmap_vector_free (hoist_vbein);
2752 sbitmap_vector_free (hoist_vbeout);
2754 free_dominance_info (CDI_DOMINATORS);
2757 /* Compute the very busy expressions at entry/exit from each block.
2759 An expression is very busy if all paths from a given point
2760 compute the expression. */
2763 compute_code_hoist_vbeinout (void)
2765 int changed, passes;
2768 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
2769 sbitmap_vector_zero (hoist_vbein, last_basic_block);
2778 /* We scan the blocks in the reverse order to speed up
2780 FOR_EACH_BB_REVERSE (bb)
2782 if (bb->next_bb != EXIT_BLOCK_PTR)
2784 sbitmap_intersection_of_succs (hoist_vbeout[bb->index],
2787 /* Include expressions in VBEout that are calculated
2788 in BB and available at its end. */
2789 sbitmap_a_or_b (hoist_vbeout[bb->index],
2790 hoist_vbeout[bb->index], comp[bb->index]);
2793 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index],
2795 hoist_vbeout[bb->index],
2804 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2808 fprintf (dump_file, "vbein (%d): ", bb->index);
2809 dump_sbitmap_file (dump_file, hoist_vbein[bb->index]);
2810 fprintf (dump_file, "vbeout(%d): ", bb->index);
2811 dump_sbitmap_file (dump_file, hoist_vbeout[bb->index]);
2816 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2819 compute_code_hoist_data (void)
2821 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2822 prune_expressions (false);
2823 compute_code_hoist_vbeinout ();
2824 calculate_dominance_info (CDI_DOMINATORS);
2826 fprintf (dump_file, "\n");
2829 /* Determine if the expression identified by EXPR_INDEX would
2830 reach BB unimpared if it was placed at the end of EXPR_BB.
2831 Stop the search if the expression would need to be moved more
2832 than DISTANCE instructions.
2834 It's unclear exactly what Muchnick meant by "unimpared". It seems
2835 to me that the expression must either be computed or transparent in
2836 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2837 would allow the expression to be hoisted out of loops, even if
2838 the expression wasn't a loop invariant.
2840 Contrast this to reachability for PRE where an expression is
2841 considered reachable if *any* path reaches instead of *all*
2845 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb,
2846 char *visited, int distance, int *bb_size)
2850 int visited_allocated_locally = 0;
2852 /* Terminate the search if distance, for which EXPR is allowed to move,
2856 distance -= bb_size[bb->index];
2862 gcc_assert (distance == 0);
2864 if (visited == NULL)
2866 visited_allocated_locally = 1;
2867 visited = XCNEWVEC (char, last_basic_block);
2870 FOR_EACH_EDGE (pred, ei, bb->preds)
2872 basic_block pred_bb = pred->src;
2874 if (pred->src == ENTRY_BLOCK_PTR)
2876 else if (pred_bb == expr_bb)
2878 else if (visited[pred_bb->index])
2881 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
2887 visited[pred_bb->index] = 1;
2888 if (! hoist_expr_reaches_here_p (expr_bb, expr_index, pred_bb,
2889 visited, distance, bb_size))
2893 if (visited_allocated_locally)
2896 return (pred == NULL);
2899 /* Find occurrence in BB. */
2901 static struct occr *
2902 find_occr_in_bb (struct occr *occr, basic_block bb)
2904 /* Find the right occurrence of this expression. */
2905 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
2911 /* Actually perform code hoisting. */
2916 basic_block bb, dominated;
2917 VEC (basic_block, heap) *dom_tree_walk;
2918 unsigned int dom_tree_walk_index;
2919 VEC (basic_block, heap) *domby;
2921 struct expr **index_map;
2927 /* Compute a mapping from expression number (`bitmap_index') to
2928 hash table entry. */
2930 index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
2931 for (i = 0; i < expr_hash_table.size; i++)
2932 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2933 index_map[expr->bitmap_index] = expr;
2935 /* Calculate sizes of basic blocks and note how far
2936 each instruction is from the start of its block. We then use this
2937 data to restrict distance an expression can travel. */
2939 to_bb_head = XCNEWVEC (int, get_max_uid ());
2940 bb_size = XCNEWVEC (int, last_basic_block);
2948 FOR_BB_INSNS (bb, insn)
2950 /* Don't count debug instructions to avoid them affecting
2951 decision choices. */
2952 if (NONDEBUG_INSN_P (insn))
2953 to_bb_head[INSN_UID (insn)] = to_head++;
2956 bb_size[bb->index] = to_head;
2959 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR->succs) == 1
2960 && (EDGE_SUCC (ENTRY_BLOCK_PTR, 0)->dest
2961 == ENTRY_BLOCK_PTR->next_bb));
2963 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
2964 ENTRY_BLOCK_PTR->next_bb);
2966 /* Walk over each basic block looking for potentially hoistable
2967 expressions, nothing gets hoisted from the entry block. */
2968 FOR_EACH_VEC_ELT (basic_block, dom_tree_walk, dom_tree_walk_index, bb)
2970 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
2972 if (VEC_length (basic_block, domby) == 0)
2975 /* Examine each expression that is very busy at the exit of this
2976 block. These are the potentially hoistable expressions. */
2977 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
2979 if (TEST_BIT (hoist_vbeout[bb->index], i))
2981 /* Current expression. */
2982 struct expr *expr = index_map[i];
2983 /* Number of occurrences of EXPR that can be hoisted to BB. */
2985 /* Basic blocks that have occurrences reachable from BB. */
2986 bitmap_head _from_bbs, *from_bbs = &_from_bbs;
2987 /* Occurrences reachable from BB. */
2988 VEC (occr_t, heap) *occrs_to_hoist = NULL;
2989 /* We want to insert the expression into BB only once, so
2990 note when we've inserted it. */
2991 int insn_inserted_p;
2994 bitmap_initialize (from_bbs, 0);
2996 /* If an expression is computed in BB and is available at end of
2997 BB, hoist all occurrences dominated by BB to BB. */
2998 if (TEST_BIT (comp[bb->index], i))
3000 occr = find_occr_in_bb (expr->antic_occr, bb);
3004 /* An occurrence might've been already deleted
3005 while processing a dominator of BB. */
3006 if (!occr->deleted_p)
3008 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3016 /* We've found a potentially hoistable expression, now
3017 we look at every block BB dominates to see if it
3018 computes the expression. */
3019 FOR_EACH_VEC_ELT (basic_block, domby, j, dominated)
3023 /* Ignore self dominance. */
3024 if (bb == dominated)
3026 /* We've found a dominated block, now see if it computes
3027 the busy expression and whether or not moving that
3028 expression to the "beginning" of that block is safe. */
3029 if (!TEST_BIT (antloc[dominated->index], i))
3032 occr = find_occr_in_bb (expr->antic_occr, dominated);
3035 /* An occurrence might've been already deleted
3036 while processing a dominator of BB. */
3037 if (occr->deleted_p)
3039 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3041 max_distance = expr->max_distance;
3042 if (max_distance > 0)
3043 /* Adjust MAX_DISTANCE to account for the fact that
3044 OCCR won't have to travel all of DOMINATED, but
3046 max_distance += (bb_size[dominated->index]
3047 - to_bb_head[INSN_UID (occr->insn)]);
3049 /* Note if the expression would reach the dominated block
3050 unimpared if it was placed at the end of BB.
3052 Keep track of how many times this expression is hoistable
3053 from a dominated block into BB. */
3054 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL,
3055 max_distance, bb_size))
3058 VEC_safe_push (occr_t, heap,
3059 occrs_to_hoist, occr);
3060 bitmap_set_bit (from_bbs, dominated->index);
3064 /* If we found more than one hoistable occurrence of this
3065 expression, then note it in the vector of expressions to
3066 hoist. It makes no sense to hoist things which are computed
3067 in only one BB, and doing so tends to pessimize register
3068 allocation. One could increase this value to try harder
3069 to avoid any possible code expansion due to register
3070 allocation issues; however experiments have shown that
3071 the vast majority of hoistable expressions are only movable
3072 from two successors, so raising this threshold is likely
3073 to nullify any benefit we get from code hoisting. */
3074 if (hoistable > 1 && dbg_cnt (hoist_insn))
3076 /* If (hoistable != VEC_length), then there is
3077 an occurrence of EXPR in BB itself. Don't waste
3078 time looking for LCA in this case. */
3079 if ((unsigned) hoistable
3080 == VEC_length (occr_t, occrs_to_hoist))
3084 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3087 /* Punt, it's better to hoist these occurrences to
3089 VEC_free (occr_t, heap, occrs_to_hoist);
3093 /* Punt, no point hoisting a single occurence. */
3094 VEC_free (occr_t, heap, occrs_to_hoist);
3096 insn_inserted_p = 0;
3098 /* Walk through occurrences of I'th expressions we want
3099 to hoist to BB and make the transformations. */
3100 FOR_EACH_VEC_ELT (occr_t, occrs_to_hoist, j, occr)
3105 gcc_assert (!occr->deleted_p);
3108 set = single_set (insn);
3111 /* Create a pseudo-reg to store the result of reaching
3112 expressions into. Get the mode for the new pseudo
3113 from the mode of the original destination pseudo.
3115 It is important to use new pseudos whenever we
3116 emit a set. This will allow reload to use
3117 rematerialization for such registers. */
3118 if (!insn_inserted_p)
3120 = gen_reg_rtx_and_attrs (SET_DEST (set));
3122 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3125 occr->deleted_p = 1;
3129 if (!insn_inserted_p)
3131 insert_insn_end_basic_block (expr, bb);
3132 insn_inserted_p = 1;
3136 VEC_free (occr_t, heap, occrs_to_hoist);
3137 bitmap_clear (from_bbs);
3140 VEC_free (basic_block, heap, domby);
3143 VEC_free (basic_block, heap, dom_tree_walk);
3151 /* Top level routine to perform one code hoisting (aka unification) pass
3153 Return nonzero if a change was made. */
3156 one_code_hoisting_pass (void)
3160 gcse_subst_count = 0;
3161 gcse_create_count = 0;
3163 /* Return if there's nothing to do, or it is too expensive. */
3164 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
3165 || is_too_expensive (_("GCSE disabled")))
3168 doing_code_hoisting_p = true;
3170 /* We need alias. */
3171 init_alias_analysis ();
3174 gcc_obstack_init (&gcse_obstack);
3177 alloc_hash_table (&expr_hash_table);
3178 compute_hash_table (&expr_hash_table);
3180 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3182 if (expr_hash_table.n_elems > 0)
3184 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
3185 compute_code_hoist_data ();
3186 changed = hoist_code ();
3187 free_code_hoist_mem ();
3190 free_hash_table (&expr_hash_table);
3192 obstack_free (&gcse_obstack, NULL);
3194 /* We are finished with alias. */
3195 end_alias_analysis ();
3199 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3200 current_function_name (), n_basic_blocks, bytes_used);
3201 fprintf (dump_file, "%d substs, %d insns created\n",
3202 gcse_subst_count, gcse_create_count);
3205 doing_code_hoisting_p = false;
3210 /* Here we provide the things required to do store motion towards the exit.
3211 In order for this to be effective, gcse also needed to be taught how to
3212 move a load when it is killed only by a store to itself.
3217 void foo(float scale)
3219 for (i=0; i<10; i++)
3223 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3224 the load out since its live around the loop, and stored at the bottom
3227 The 'Load Motion' referred to and implemented in this file is
3228 an enhancement to gcse which when using edge based LCM, recognizes
3229 this situation and allows gcse to move the load out of the loop.
3231 Once gcse has hoisted the load, store motion can then push this
3232 load towards the exit, and we end up with no loads or stores of 'i'
3236 pre_ldst_expr_hash (const void *p)
3238 int do_not_record_p = 0;
3239 const struct ls_expr *const x = (const struct ls_expr *) p;
3241 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
3245 pre_ldst_expr_eq (const void *p1, const void *p2)
3247 const struct ls_expr *const ptr1 = (const struct ls_expr *) p1,
3248 *const ptr2 = (const struct ls_expr *) p2;
3249 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
3252 /* This will search the ldst list for a matching expression. If it
3253 doesn't find one, we create one and initialize it. */
3255 static struct ls_expr *
3258 int do_not_record_p = 0;
3259 struct ls_expr * ptr;
3264 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3265 NULL, /*have_reg_qty=*/false);
3268 slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
3270 return (struct ls_expr *)*slot;
3272 ptr = XNEW (struct ls_expr);
3274 ptr->next = pre_ldst_mems;
3277 ptr->pattern_regs = NULL_RTX;
3278 ptr->loads = NULL_RTX;
3279 ptr->stores = NULL_RTX;
3280 ptr->reaching_reg = NULL_RTX;
3283 ptr->hash_index = hash;
3284 pre_ldst_mems = ptr;
3290 /* Free up an individual ldst entry. */
3293 free_ldst_entry (struct ls_expr * ptr)
3295 free_INSN_LIST_list (& ptr->loads);
3296 free_INSN_LIST_list (& ptr->stores);
3301 /* Free up all memory associated with the ldst list. */
3304 free_ld_motion_mems (void)
3307 htab_delete (pre_ldst_table);
3308 pre_ldst_table = NULL;
3310 while (pre_ldst_mems)
3312 struct ls_expr * tmp = pre_ldst_mems;
3314 pre_ldst_mems = pre_ldst_mems->next;
3316 free_ldst_entry (tmp);
3319 pre_ldst_mems = NULL;
3322 /* Dump debugging info about the ldst list. */
3325 print_ldst_list (FILE * file)
3327 struct ls_expr * ptr;
3329 fprintf (file, "LDST list: \n");
3331 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3333 fprintf (file, " Pattern (%3d): ", ptr->index);
3335 print_rtl (file, ptr->pattern);
3337 fprintf (file, "\n Loads : ");
3340 print_rtl (file, ptr->loads);
3342 fprintf (file, "(nil)");
3344 fprintf (file, "\n Stores : ");
3347 print_rtl (file, ptr->stores);
3349 fprintf (file, "(nil)");
3351 fprintf (file, "\n\n");
3354 fprintf (file, "\n");
3357 /* Returns 1 if X is in the list of ldst only expressions. */
3359 static struct ls_expr *
3360 find_rtx_in_ldst (rtx x)
3364 if (!pre_ldst_table)
3367 slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
3368 if (!slot || ((struct ls_expr *)*slot)->invalid)
3370 return (struct ls_expr *) *slot;
3373 /* Load Motion for loads which only kill themselves. */
3375 /* Return true if x, a MEM, is a simple access with no side effects.
3376 These are the types of loads we consider for the ld_motion list,
3377 otherwise we let the usual aliasing take care of it. */
3380 simple_mem (const_rtx x)
3382 if (MEM_VOLATILE_P (x))
3385 if (GET_MODE (x) == BLKmode)
3388 /* If we are handling exceptions, we must be careful with memory references
3389 that may trap. If we are not, the behavior is undefined, so we may just
3391 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3394 if (side_effects_p (x))
3397 /* Do not consider function arguments passed on stack. */
3398 if (reg_mentioned_p (stack_pointer_rtx, x))
3401 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3407 /* Make sure there isn't a buried reference in this pattern anywhere.
3408 If there is, invalidate the entry for it since we're not capable
3409 of fixing it up just yet.. We have to be sure we know about ALL
3410 loads since the aliasing code will allow all entries in the
3411 ld_motion list to not-alias itself. If we miss a load, we will get
3412 the wrong value since gcse might common it and we won't know to
3416 invalidate_any_buried_refs (rtx x)
3420 struct ls_expr * ptr;
3422 /* Invalidate it in the list. */
3423 if (MEM_P (x) && simple_mem (x))
3425 ptr = ldst_entry (x);
3429 /* Recursively process the insn. */
3430 fmt = GET_RTX_FORMAT (GET_CODE (x));
3432 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3435 invalidate_any_buried_refs (XEXP (x, i));
3436 else if (fmt[i] == 'E')
3437 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3438 invalidate_any_buried_refs (XVECEXP (x, i, j));
3442 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3443 being defined as MEM loads and stores to symbols, with no side effects
3444 and no registers in the expression. For a MEM destination, we also
3445 check that the insn is still valid if we replace the destination with a
3446 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3447 which don't match this criteria, they are invalidated and trimmed out
3451 compute_ld_motion_mems (void)
3453 struct ls_expr * ptr;
3457 pre_ldst_mems = NULL;
3459 = htab_create (13, pre_ldst_expr_hash, pre_ldst_expr_eq, NULL);
3463 FOR_BB_INSNS (bb, insn)
3465 if (NONDEBUG_INSN_P (insn))
3467 if (GET_CODE (PATTERN (insn)) == SET)
3469 rtx src = SET_SRC (PATTERN (insn));
3470 rtx dest = SET_DEST (PATTERN (insn));
3472 /* Check for a simple LOAD... */
3473 if (MEM_P (src) && simple_mem (src))
3475 ptr = ldst_entry (src);
3477 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
3483 /* Make sure there isn't a buried load somewhere. */
3484 invalidate_any_buried_refs (src);
3487 /* Check for stores. Don't worry about aliased ones, they
3488 will block any movement we might do later. We only care
3489 about this exact pattern since those are the only
3490 circumstance that we will ignore the aliasing info. */
3491 if (MEM_P (dest) && simple_mem (dest))
3493 ptr = ldst_entry (dest);
3496 && GET_CODE (src) != ASM_OPERANDS
3497 /* Check for REG manually since want_to_gcse_p
3498 returns 0 for all REGs. */
3499 && can_assign_to_reg_without_clobbers_p (src))
3500 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
3506 invalidate_any_buried_refs (PATTERN (insn));
3512 /* Remove any references that have been either invalidated or are not in the
3513 expression list for pre gcse. */
3516 trim_ld_motion_mems (void)
3518 struct ls_expr * * last = & pre_ldst_mems;
3519 struct ls_expr * ptr = pre_ldst_mems;
3525 /* Delete if entry has been made invalid. */
3528 /* Delete if we cannot find this mem in the expression list. */
3529 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3531 for (expr = expr_hash_table.table[hash];
3533 expr = expr->next_same_hash)
3534 if (expr_equiv_p (expr->expr, ptr->pattern))
3538 expr = (struct expr *) 0;
3542 /* Set the expression field if we are keeping it. */
3550 htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
3551 free_ldst_entry (ptr);
3556 /* Show the world what we've found. */
3557 if (dump_file && pre_ldst_mems != NULL)
3558 print_ldst_list (dump_file);
3561 /* This routine will take an expression which we are replacing with
3562 a reaching register, and update any stores that are needed if
3563 that expression is in the ld_motion list. Stores are updated by
3564 copying their SRC to the reaching register, and then storing
3565 the reaching register into the store location. These keeps the
3566 correct value in the reaching register for the loads. */
3569 update_ld_motion_stores (struct expr * expr)
3571 struct ls_expr * mem_ptr;
3573 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3575 /* We can try to find just the REACHED stores, but is shouldn't
3576 matter to set the reaching reg everywhere... some might be
3577 dead and should be eliminated later. */
3579 /* We replace (set mem expr) with (set reg expr) (set mem reg)
3580 where reg is the reaching reg used in the load. We checked in
3581 compute_ld_motion_mems that we can replace (set mem expr) with
3582 (set reg expr) in that insn. */
3583 rtx list = mem_ptr->stores;
3585 for ( ; list != NULL_RTX; list = XEXP (list, 1))
3587 rtx insn = XEXP (list, 0);
3588 rtx pat = PATTERN (insn);
3589 rtx src = SET_SRC (pat);
3590 rtx reg = expr->reaching_reg;
3593 /* If we've already copied it, continue. */
3594 if (expr->reaching_reg == src)
3599 fprintf (dump_file, "PRE: store updated with reaching reg ");
3600 print_rtl (dump_file, reg);
3601 fprintf (dump_file, ":\n ");
3602 print_inline_rtx (dump_file, insn, 8);
3603 fprintf (dump_file, "\n");
3606 copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3607 emit_insn_before (copy, insn);
3608 SET_SRC (pat) = reg;
3609 df_insn_rescan (insn);
3611 /* un-recognize this pattern since it's probably different now. */
3612 INSN_CODE (insn) = -1;
3613 gcse_create_count++;
3618 /* Return true if the graph is too expensive to optimize. PASS is the
3619 optimization about to be performed. */
3622 is_too_expensive (const char *pass)
3624 /* Trying to perform global optimizations on flow graphs which have
3625 a high connectivity will take a long time and is unlikely to be
3626 particularly useful.
3628 In normal circumstances a cfg should have about twice as many
3629 edges as blocks. But we do not want to punish small functions
3630 which have a couple switch statements. Rather than simply
3631 threshold the number of blocks, uses something with a more
3632 graceful degradation. */
3633 if (n_edges > 20000 + n_basic_blocks * 4)
3635 warning (OPT_Wdisabled_optimization,
3636 "%s: %d basic blocks and %d edges/basic block",
3637 pass, n_basic_blocks, n_edges / n_basic_blocks);
3642 /* If allocating memory for the dataflow bitmaps would take up too much
3643 storage it's better just to disable the optimization. */
3645 * SBITMAP_SET_SIZE (max_reg_num ())
3646 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
3648 warning (OPT_Wdisabled_optimization,
3649 "%s: %d basic blocks and %d registers",
3650 pass, n_basic_blocks, max_reg_num ());
3658 /* All the passes implemented in this file. Each pass has its
3659 own gate and execute function, and at the end of the file a
3660 pass definition for passes.c.
3662 We do not construct an accurate cfg in functions which call
3663 setjmp, so none of these passes runs if the function calls
3665 FIXME: Should just handle setjmp via REG_SETJMP notes. */
3670 return optimize > 0 && flag_gcse
3671 && !cfun->calls_setjmp
3672 && optimize_function_for_speed_p (cfun)
3677 execute_rtl_pre (void)
3680 delete_unreachable_blocks ();
3682 changed = one_pre_gcse_pass ();
3683 flag_rerun_cse_after_global_opts |= changed;
3690 gate_rtl_hoist (void)
3692 return optimize > 0 && flag_gcse
3693 && !cfun->calls_setjmp
3694 /* It does not make sense to run code hoisting unless we are optimizing
3695 for code size -- it rarely makes programs faster, and can make then
3696 bigger if we did PRE (when optimizing for space, we don't run PRE). */
3697 && optimize_function_for_size_p (cfun)
3702 execute_rtl_hoist (void)
3705 delete_unreachable_blocks ();
3707 changed = one_code_hoisting_pass ();
3708 flag_rerun_cse_after_global_opts |= changed;
3714 struct rtl_opt_pass pass_rtl_pre =
3718 "rtl pre", /* name */
3719 gate_rtl_pre, /* gate */
3720 execute_rtl_pre, /* execute */
3723 0, /* static_pass_number */
3725 PROP_cfglayout, /* properties_required */
3726 0, /* properties_provided */
3727 0, /* properties_destroyed */
3728 0, /* todo_flags_start */
3729 TODO_df_finish | TODO_verify_rtl_sharing |
3730 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
3734 struct rtl_opt_pass pass_rtl_hoist =
3739 gate_rtl_hoist, /* gate */
3740 execute_rtl_hoist, /* execute */
3743 0, /* static_pass_number */
3744 TV_HOIST, /* tv_id */
3745 PROP_cfglayout, /* properties_required */
3746 0, /* properties_provided */
3747 0, /* properties_destroyed */
3748 0, /* todo_flags_start */
3749 TODO_df_finish | TODO_verify_rtl_sharing |
3750 TODO_verify_flow | TODO_ggc_collect /* todo_flags_finish */
3754 #include "gt-gcse.h"