1 /* Integrated Register Allocator (IRA) entry point.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* The integrated register allocator (IRA) is a
23 regional register allocator performing graph coloring on a top-down
24 traversal of nested regions. Graph coloring in a region is based
25 on Chaitin-Briggs algorithm. It is called integrated because
26 register coalescing, register live range splitting, and choosing a
27 better hard register are done on-the-fly during coloring. Register
28 coalescing and choosing a cheaper hard register is done by hard
29 register preferencing during hard register assigning. The live
30 range splitting is a byproduct of the regional register allocation.
32 Major IRA notions are:
34 o *Region* is a part of CFG where graph coloring based on
35 Chaitin-Briggs algorithm is done. IRA can work on any set of
36 nested CFG regions forming a tree. Currently the regions are
37 the entire function for the root region and natural loops for
38 the other regions. Therefore data structure representing a
39 region is called loop_tree_node.
41 o *Cover class* is a register class belonging to a set of
42 non-intersecting register classes containing all of the
43 hard-registers available for register allocation. The set of
44 all cover classes for a target is defined in the corresponding
45 machine-description file according some criteria. Such notion
46 is needed because Chaitin-Briggs algorithm works on
47 non-intersected register classes.
49 o *Allocno* represents the live range of a pseudo-register in a
50 region. Besides the obvious attributes like the corresponding
51 pseudo-register number, cover class, conflicting allocnos and
52 conflicting hard-registers, there are a few allocno attributes
53 which are important for understanding the allocation algorithm:
55 - *Live ranges*. This is a list of ranges of *program
56 points* where the allocno lives. Program points represent
57 places where a pseudo can be born or become dead (there are
58 approximately two times more program points than the insns)
59 and they are represented by integers starting with 0. The
60 live ranges are used to find conflicts between allocnos of
61 different cover classes. They also play very important role
62 for the transformation of the IRA internal representation of
63 several regions into a one region representation. The later is
64 used during the reload pass work because each allocno
65 represents all of the corresponding pseudo-registers.
67 - *Hard-register costs*. This is a vector of size equal to the
68 number of available hard-registers of the allocno's cover
69 class. The cost of a callee-clobbered hard-register for an
70 allocno is increased by the cost of save/restore code around
71 the calls through the given allocno's life. If the allocno
72 is a move instruction operand and another operand is a
73 hard-register of the allocno's cover class, the cost of the
74 hard-register is decreased by the move cost.
76 When an allocno is assigned, the hard-register with minimal
77 full cost is used. Initially, a hard-register's full cost is
78 the corresponding value from the hard-register's cost vector.
79 If the allocno is connected by a *copy* (see below) to
80 another allocno which has just received a hard-register, the
81 cost of the hard-register is decreased. Before choosing a
82 hard-register for an allocno, the allocno's current costs of
83 the hard-registers are modified by the conflict hard-register
84 costs of all of the conflicting allocnos which are not
87 - *Conflict hard-register costs*. This is a vector of the same
88 size as the hard-register costs vector. To permit an
89 unassigned allocno to get a better hard-register, IRA uses
90 this vector to calculate the final full cost of the
91 available hard-registers. Conflict hard-register costs of an
92 unassigned allocno are also changed with a change of the
93 hard-register cost of the allocno when a copy involving the
94 allocno is processed as described above. This is done to
95 show other unassigned allocnos that a given allocno prefers
96 some hard-registers in order to remove the move instruction
97 corresponding to the copy.
99 o *Cap*. If a pseudo-register does not live in a region but
100 lives in a nested region, IRA creates a special allocno called
101 a cap in the outer region. A region cap is also created for a
104 o *Copy*. Allocnos can be connected by copies. Copies are used
105 to modify hard-register costs for allocnos during coloring.
106 Such modifications reflects a preference to use the same
107 hard-register for the allocnos connected by copies. Usually
108 copies are created for move insns (in this case it results in
109 register coalescing). But IRA also creates copies for operands
110 of an insn which should be assigned to the same hard-register
111 due to constraints in the machine description (it usually
112 results in removing a move generated in reload to satisfy
113 the constraints) and copies referring to the allocno which is
114 the output operand of an instruction and the allocno which is
115 an input operand dying in the instruction (creation of such
116 copies results in less register shuffling). IRA *does not*
117 create copies between the same register allocnos from different
118 regions because we use another technique for propagating
119 hard-register preference on the borders of regions.
121 Allocnos (including caps) for the upper region in the region tree
122 *accumulate* information important for coloring from allocnos with
123 the same pseudo-register from nested regions. This includes
124 hard-register and memory costs, conflicts with hard-registers,
125 allocno conflicts, allocno copies and more. *Thus, attributes for
126 allocnos in a region have the same values as if the region had no
127 subregions*. It means that attributes for allocnos in the
128 outermost region corresponding to the function have the same values
129 as though the allocation used only one region which is the entire
130 function. It also means that we can look at IRA work as if the
131 first IRA did allocation for all function then it improved the
132 allocation for loops then their subloops and so on.
134 IRA major passes are:
136 o Building IRA internal representation which consists of the
139 * First, IRA builds regions and creates allocnos (file
140 ira-build.c) and initializes most of their attributes.
142 * Then IRA finds a cover class for each allocno and calculates
143 its initial (non-accumulated) cost of memory and each
144 hard-register of its cover class (file ira-cost.c).
146 * IRA creates live ranges of each allocno, calulates register
147 pressure for each cover class in each region, sets up
148 conflict hard registers for each allocno and info about calls
149 the allocno lives through (file ira-lives.c).
151 * IRA removes low register pressure loops from the regions
152 mostly to speed IRA up (file ira-build.c).
154 * IRA propagates accumulated allocno info from lower region
155 allocnos to corresponding upper region allocnos (file
158 * IRA creates all caps (file ira-build.c).
160 * Having live-ranges of allocnos and their cover classes, IRA
161 creates conflicting allocnos of the same cover class for each
162 allocno. Conflicting allocnos are stored as a bit vector or
163 array of pointers to the conflicting allocnos whatever is
164 more profitable (file ira-conflicts.c). At this point IRA
165 creates allocno copies.
167 o Coloring. Now IRA has all necessary info to start graph coloring
168 process. It is done in each region on top-down traverse of the
169 region tree (file ira-color.c). There are following subpasses:
171 * Optional aggressive coalescing of allocnos in the region.
173 * Putting allocnos onto the coloring stack. IRA uses Briggs
174 optimistic coloring which is a major improvement over
175 Chaitin's coloring. Therefore IRA does not spill allocnos at
176 this point. There is some freedom in the order of putting
177 allocnos on the stack which can affect the final result of
178 the allocation. IRA uses some heuristics to improve the order.
180 * Popping the allocnos from the stack and assigning them hard
181 registers. If IRA can not assign a hard register to an
182 allocno and the allocno is coalesced, IRA undoes the
183 coalescing and puts the uncoalesced allocnos onto the stack in
184 the hope that some such allocnos will get a hard register
185 separately. If IRA fails to assign hard register or memory
186 is more profitable for it, IRA spills the allocno. IRA
187 assigns the allocno the hard-register with minimal full
188 allocation cost which reflects the cost of usage of the
189 hard-register for the allocno and cost of usage of the
190 hard-register for allocnos conflicting with given allocno.
192 * After allono assigning in the region, IRA modifies the hard
193 register and memory costs for the corresponding allocnos in
194 the subregions to reflect the cost of possible loads, stores,
195 or moves on the border of the region and its subregions.
196 When default regional allocation algorithm is used
197 (-fira-algorithm=mixed), IRA just propagates the assignment
198 for allocnos if the register pressure in the region for the
199 corresponding cover class is less than number of available
200 hard registers for given cover class.
202 o Spill/restore code moving. When IRA performs an allocation
203 by traversing regions in top-down order, it does not know what
204 happens below in the region tree. Therefore, sometimes IRA
205 misses opportunities to perform a better allocation. A simple
206 optimization tries to improve allocation in a region having
207 subregions and containing in another region. If the
208 corresponding allocnos in the subregion are spilled, it spills
209 the region allocno if it is profitable. The optimization
210 implements a simple iterative algorithm performing profitable
211 transformations while they are still possible. It is fast in
212 practice, so there is no real need for a better time complexity
215 o Code change. After coloring, two allocnos representing the same
216 pseudo-register outside and inside a region respectively may be
217 assigned to different locations (hard-registers or memory). In
218 this case IRA creates and uses a new pseudo-register inside the
219 region and adds code to move allocno values on the region's
220 borders. This is done during top-down traversal of the regions
221 (file ira-emit.c). In some complicated cases IRA can create a
222 new allocno to move allocno values (e.g. when a swap of values
223 stored in two hard-registers is needed). At this stage, the
224 new allocno is marked as spilled. IRA still creates the
225 pseudo-register and the moves on the region borders even when
226 both allocnos were assigned to the same hard-register. If the
227 reload pass spills a pseudo-register for some reason, the
228 effect will be smaller because another allocno will still be in
229 the hard-register. In most cases, this is better then spilling
230 both allocnos. If reload does not change the allocation
231 for the two pseudo-registers, the trivial move will be removed
232 by post-reload optimizations. IRA does not generate moves for
233 allocnos assigned to the same hard register when the default
234 regional allocation algorithm is used and the register pressure
235 in the region for the corresponding allocno cover class is less
236 than number of available hard registers for given cover class.
237 IRA also does some optimizations to remove redundant stores and
238 to reduce code duplication on the region borders.
240 o Flattening internal representation. After changing code, IRA
241 transforms its internal representation for several regions into
242 one region representation (file ira-build.c). This process is
243 called IR flattening. Such process is more complicated than IR
244 rebuilding would be, but is much faster.
246 o After IR flattening, IRA tries to assign hard registers to all
247 spilled allocnos. This is impelemented by a simple and fast
248 priority coloring algorithm (see function
249 ira_reassign_conflict_allocnos::ira-color.c). Here new allocnos
250 created during the code change pass can be assigned to hard
253 o At the end IRA calls the reload pass. The reload pass
254 communicates with IRA through several functions in file
255 ira-color.c to improve its decisions in
257 * sharing stack slots for the spilled pseudos based on IRA info
258 about pseudo-register conflicts.
260 * reassigning hard-registers to all spilled pseudos at the end
261 of each reload iteration.
263 * choosing a better hard-register to spill based on IRA info
264 about pseudo-register live ranges and the register pressure
265 in places where the pseudo-register lives.
267 IRA uses a lot of data representing the target processors. These
268 data are initilized in file ira.c.
270 If function has no loops (or the loops are ignored when
271 -fira-algorithm=CB is used), we have classic Chaitin-Briggs
272 coloring (only instead of separate pass of coalescing, we use hard
273 register preferencing). In such case, IRA works much faster
274 because many things are not made (like IR flattening, the
275 spill/restore optimization, and the code change).
277 Literature is worth to read for better understanding the code:
279 o Preston Briggs, Keith D. Cooper, Linda Torczon. Improvements to
280 Graph Coloring Register Allocation.
282 o David Callahan, Brian Koblenz. Register allocation via
283 hierarchical graph coloring.
285 o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph
286 Coloring Register Allocation: A Study of the Chaitin-Briggs and
287 Callahan-Koblenz Algorithms.
289 o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global
290 Register Allocation Based on Graph Fusion.
292 o Vladimir Makarov. The Integrated Register Allocator for GCC.
294 o Vladimir Makarov. The top-down register allocator for irregular
295 register file architectures.
302 #include "coretypes.h"
311 #include "hard-reg-set.h"
312 #include "basic-block.h"
317 #include "tree-pass.h"
322 #include "integrate.h"
328 /* A modified value of flag `-fira-verbose' used internally. */
329 int internal_flag_ira_verbose;
331 /* Dump file of the allocator if it is not NULL. */
334 /* Pools for allocnos, copies, allocno live ranges. */
335 alloc_pool allocno_pool, copy_pool, allocno_live_range_pool;
337 /* The number of elements in the following array. */
338 int ira_spilled_reg_stack_slots_num;
340 /* The following array contains info about spilled pseudo-registers
341 stack slots used in current function so far. */
342 struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
344 /* Correspondingly overall cost of the allocation, cost of the
345 allocnos assigned to hard-registers, cost of the allocnos assigned
346 to memory, cost of loads, stores and register move insns generated
347 for pseudo-register live range splitting (see ira-emit.c). */
348 int ira_overall_cost;
349 int ira_reg_cost, ira_mem_cost;
350 int ira_load_cost, ira_store_cost, ira_shuffle_cost;
351 int ira_move_loops_num, ira_additional_jumps_num;
353 /* All registers that can be eliminated. */
355 HARD_REG_SET eliminable_regset;
357 /* Map: hard regs X modes -> set of hard registers for storing value
358 of given mode starting with given hard register. */
359 HARD_REG_SET ira_reg_mode_hard_regset[FIRST_PSEUDO_REGISTER][NUM_MACHINE_MODES];
361 /* The following two variables are array analogs of the macros
362 MEMORY_MOVE_COST and REGISTER_MOVE_COST. */
363 short int ira_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
364 move_table *ira_register_move_cost[MAX_MACHINE_MODE];
366 /* Similar to may_move_in_cost but it is calculated in IRA instead of
367 regclass. Another difference is that we take only available hard
368 registers into account to figure out that one register class is a
369 subset of the another one. */
370 move_table *ira_may_move_in_cost[MAX_MACHINE_MODE];
372 /* Similar to may_move_out_cost but it is calculated in IRA instead of
373 regclass. Another difference is that we take only available hard
374 registers into account to figure out that one register class is a
375 subset of the another one. */
376 move_table *ira_may_move_out_cost[MAX_MACHINE_MODE];
378 /* Register class subset relation: TRUE if the first class is a subset
379 of the second one considering only hard registers available for the
381 int ira_class_subset_p[N_REG_CLASSES][N_REG_CLASSES];
383 /* Temporary hard reg set used for a different calculation. */
384 static HARD_REG_SET temp_hard_regset;
388 /* The function sets up the map IRA_REG_MODE_HARD_REGSET. */
390 setup_reg_mode_hard_regset (void)
392 int i, m, hard_regno;
394 for (m = 0; m < NUM_MACHINE_MODES; m++)
395 for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
397 CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]);
398 for (i = hard_regno_nregs[hard_regno][m] - 1; i >= 0; i--)
399 if (hard_regno + i < FIRST_PSEUDO_REGISTER)
400 SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m],
407 /* Hard registers that can not be used for the register allocator for
408 all functions of the current compilation unit. */
409 static HARD_REG_SET no_unit_alloc_regs;
411 /* Array of the number of hard registers of given class which are
412 available for allocation. The order is defined by the
414 short ira_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
416 /* Array of the number of hard registers of given class which are
417 available for allocation. The order is defined by the
418 the hard register numbers. */
419 short ira_non_ordered_class_hard_regs[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
421 /* The number of elements of the above array for given register
423 int ira_class_hard_regs_num[N_REG_CLASSES];
425 /* Index (in ira_class_hard_regs) for given register class and hard
426 register (in general case a hard register can belong to several
427 register classes). The index is negative for hard registers
428 unavailable for the allocation. */
429 short ira_class_hard_reg_index[N_REG_CLASSES][FIRST_PSEUDO_REGISTER];
431 /* The function sets up the three arrays declared above. */
433 setup_class_hard_regs (void)
435 int cl, i, hard_regno, n;
436 HARD_REG_SET processed_hard_reg_set;
438 ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
439 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
441 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
442 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
443 CLEAR_HARD_REG_SET (processed_hard_reg_set);
444 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
446 ira_non_ordered_class_hard_regs[cl][0] = -1;
447 ira_class_hard_reg_index[cl][0] = -1;
449 for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
451 #ifdef REG_ALLOC_ORDER
452 hard_regno = reg_alloc_order[i];
456 if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
458 SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
459 if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno))
460 ira_class_hard_reg_index[cl][hard_regno] = -1;
463 ira_class_hard_reg_index[cl][hard_regno] = n;
464 ira_class_hard_regs[cl][n++] = hard_regno;
467 ira_class_hard_regs_num[cl] = n;
468 for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
469 if (TEST_HARD_REG_BIT (temp_hard_regset, i))
470 ira_non_ordered_class_hard_regs[cl][n++] = i;
471 ira_assert (ira_class_hard_regs_num[cl] == n);
475 /* Number of given class hard registers available for the register
476 allocation for given classes. */
477 int ira_available_class_regs[N_REG_CLASSES];
479 /* Set up IRA_AVAILABLE_CLASS_REGS. */
481 setup_available_class_regs (void)
485 memset (ira_available_class_regs, 0, sizeof (ira_available_class_regs));
486 for (i = 0; i < N_REG_CLASSES; i++)
488 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
489 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
490 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
491 if (TEST_HARD_REG_BIT (temp_hard_regset, j))
492 ira_available_class_regs[i]++;
496 /* Set up global variables defining info about hard registers for the
497 allocation. These depend on USE_HARD_FRAME_P whose TRUE value means
498 that we can use the hard frame pointer for the allocation. */
500 setup_alloc_regs (bool use_hard_frame_p)
502 #ifdef ADJUST_REG_ALLOC_ORDER
503 ADJUST_REG_ALLOC_ORDER;
505 COPY_HARD_REG_SET (no_unit_alloc_regs, fixed_reg_set);
506 if (! use_hard_frame_p)
507 SET_HARD_REG_BIT (no_unit_alloc_regs, HARD_FRAME_POINTER_REGNUM);
508 setup_class_hard_regs ();
509 setup_available_class_regs ();
514 /* Set up IRA_MEMORY_MOVE_COST, IRA_REGISTER_MOVE_COST. */
516 setup_class_subset_and_memory_move_costs (void)
519 HARD_REG_SET temp_hard_regset2;
521 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
522 ira_memory_move_cost[mode][NO_REGS][0]
523 = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX;
524 for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
526 if (cl != (int) NO_REGS)
527 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
529 ira_memory_move_cost[mode][cl][0] =
530 MEMORY_MOVE_COST ((enum machine_mode) mode,
531 (enum reg_class) cl, 0);
532 ira_memory_move_cost[mode][cl][1] =
533 MEMORY_MOVE_COST ((enum machine_mode) mode,
534 (enum reg_class) cl, 1);
535 /* Costs for NO_REGS are used in cost calculation on the
536 1st pass when the preferred register classes are not
537 known yet. In this case we take the best scenario. */
538 if (ira_memory_move_cost[mode][NO_REGS][0]
539 > ira_memory_move_cost[mode][cl][0])
540 ira_memory_move_cost[mode][NO_REGS][0]
541 = ira_memory_move_cost[mode][cl][0];
542 if (ira_memory_move_cost[mode][NO_REGS][1]
543 > ira_memory_move_cost[mode][cl][1])
544 ira_memory_move_cost[mode][NO_REGS][1]
545 = ira_memory_move_cost[mode][cl][1];
547 for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
549 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
550 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
551 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
552 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
553 ira_class_subset_p[cl][cl2]
554 = hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
561 /* Define the following macro if allocation through malloc if
563 #define IRA_NO_OBSTACK
565 #ifndef IRA_NO_OBSTACK
566 /* Obstack used for storing all dynamic data (except bitmaps) of the
568 static struct obstack ira_obstack;
571 /* Obstack used for storing all bitmaps of the IRA. */
572 static struct bitmap_obstack ira_bitmap_obstack;
574 /* Allocate memory of size LEN for IRA data. */
576 ira_allocate (size_t len)
580 #ifndef IRA_NO_OBSTACK
581 res = obstack_alloc (&ira_obstack, len);
588 /* Reallocate memory PTR of size LEN for IRA data. */
590 ira_reallocate (void *ptr, size_t len)
594 #ifndef IRA_NO_OBSTACK
595 res = obstack_alloc (&ira_obstack, len);
597 res = xrealloc (ptr, len);
602 /* Free memory ADDR allocated for IRA data. */
604 ira_free (void *addr ATTRIBUTE_UNUSED)
606 #ifndef IRA_NO_OBSTACK
614 /* Allocate and returns bitmap for IRA. */
616 ira_allocate_bitmap (void)
618 return BITMAP_ALLOC (&ira_bitmap_obstack);
621 /* Free bitmap B allocated for IRA. */
623 ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED)
630 /* Output information about allocation of all allocnos (except for
631 caps) into file F. */
633 ira_print_disposition (FILE *f)
639 fprintf (f, "Disposition:");
640 max_regno = max_reg_num ();
641 for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
642 for (a = ira_regno_allocno_map[i];
644 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
649 fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
650 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
651 fprintf (f, "b%-3d", bb->index);
653 fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop->num);
654 if (ALLOCNO_HARD_REGNO (a) >= 0)
655 fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a));
662 /* Outputs information about allocation of all allocnos into
665 ira_debug_disposition (void)
667 ira_print_disposition (stderr);
672 /* For each reg class, table listing all the classes contained in it
673 (excluding the class itself. Non-allocatable registers are
674 excluded from the consideration). */
675 static enum reg_class alloc_reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES];
677 /* Initialize the table of subclasses of each reg class. */
679 setup_reg_subclasses (void)
682 HARD_REG_SET temp_hard_regset2;
684 for (i = 0; i < N_REG_CLASSES; i++)
685 for (j = 0; j < N_REG_CLASSES; j++)
686 alloc_reg_class_subclasses[i][j] = LIM_REG_CLASSES;
688 for (i = 0; i < N_REG_CLASSES; i++)
690 if (i == (int) NO_REGS)
693 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
694 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
695 if (hard_reg_set_empty_p (temp_hard_regset))
697 for (j = 0; j < N_REG_CLASSES; j++)
702 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[j]);
703 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
704 if (! hard_reg_set_subset_p (temp_hard_regset,
707 p = &alloc_reg_class_subclasses[j][0];
708 while (*p != LIM_REG_CLASSES) p++;
709 *p = (enum reg_class) i;
716 /* Number of cover classes. Cover classes is non-intersected register
717 classes containing all hard-registers available for the
719 int ira_reg_class_cover_size;
721 /* The array containing cover classes (see also comments for macro
722 IRA_COVER_CLASSES). Only first IRA_REG_CLASS_COVER_SIZE elements are
724 enum reg_class ira_reg_class_cover[N_REG_CLASSES];
726 /* The number of elements in the subsequent array. */
727 int ira_important_classes_num;
729 /* The array containing non-empty classes (including non-empty cover
730 classes) which are subclasses of cover classes. Such classes is
731 important for calculation of the hard register usage costs. */
732 enum reg_class ira_important_classes[N_REG_CLASSES];
734 /* The array containing indexes of important classes in the previous
735 array. The array elements are defined only for important
737 int ira_important_class_nums[N_REG_CLASSES];
739 /* Set the four global variables defined above. */
741 setup_cover_and_important_classes (void)
745 const enum reg_class *cover_classes;
746 HARD_REG_SET temp_hard_regset2;
747 static enum reg_class classes[LIM_REG_CLASSES + 1];
749 if (targetm.ira_cover_classes == NULL)
750 cover_classes = NULL;
752 cover_classes = targetm.ira_cover_classes ();
753 if (cover_classes == NULL)
754 ira_assert (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY);
757 for (i = 0; (cl = cover_classes[i]) != LIM_REG_CLASSES; i++)
758 classes[i] = (enum reg_class) cl;
759 classes[i] = LIM_REG_CLASSES;
762 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
765 for (i = 0; i <= LIM_REG_CLASSES; i++)
769 #ifdef CONSTRAINT_NUM_DEFINED_P
770 for (j = 0; j < CONSTRAINT__LIMIT; j++)
771 if ((int) REG_CLASS_FOR_CONSTRAINT ((enum constraint_num) j) == i)
773 if (j < CONSTRAINT__LIMIT)
775 classes[n++] = (enum reg_class) i;
779 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
780 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
781 for (j = 0; j < LIM_REG_CLASSES; j++)
785 COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[j]);
786 AND_COMPL_HARD_REG_SET (temp_hard_regset2,
788 if (hard_reg_set_equal_p (temp_hard_regset,
793 classes[n++] = (enum reg_class) i;
795 classes[n] = LIM_REG_CLASSES;
798 ira_reg_class_cover_size = 0;
799 for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++)
801 for (j = 0; j < i; j++)
802 if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
803 && reg_classes_intersect_p ((enum reg_class) cl, classes[j]))
805 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
806 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
807 if (! hard_reg_set_empty_p (temp_hard_regset))
808 ira_reg_class_cover[ira_reg_class_cover_size++] = (enum reg_class) cl;
810 ira_important_classes_num = 0;
811 for (cl = 0; cl < N_REG_CLASSES; cl++)
813 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
814 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
815 if (! hard_reg_set_empty_p (temp_hard_regset))
818 for (j = 0; j < ira_reg_class_cover_size; j++)
820 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
821 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
822 COPY_HARD_REG_SET (temp_hard_regset2,
823 reg_class_contents[ira_reg_class_cover[j]]);
824 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
825 if ((enum reg_class) cl == ira_reg_class_cover[j]
826 || hard_reg_set_equal_p (temp_hard_regset,
829 else if (hard_reg_set_subset_p (temp_hard_regset,
833 if (set_p && j >= ira_reg_class_cover_size)
834 ira_important_classes[ira_important_classes_num++]
835 = (enum reg_class) cl;
838 for (j = 0; j < ira_reg_class_cover_size; j++)
839 ira_important_classes[ira_important_classes_num++]
840 = ira_reg_class_cover[j];
843 /* Map of all register classes to corresponding cover class containing
844 the given class. If given class is not a subset of a cover class,
845 we translate it into the cheapest cover class. */
846 enum reg_class ira_class_translate[N_REG_CLASSES];
848 /* Set up array IRA_CLASS_TRANSLATE. */
850 setup_class_translate (void)
853 enum reg_class cover_class, best_class, *cl_ptr;
854 int i, cost, min_cost, best_cost;
856 for (cl = 0; cl < N_REG_CLASSES; cl++)
857 ira_class_translate[cl] = NO_REGS;
859 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
860 for (cl = 0; cl < LIM_REG_CLASSES; cl++)
862 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
863 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
864 for (i = 0; i < ira_reg_class_cover_size; i++)
866 HARD_REG_SET temp_hard_regset2;
868 cover_class = ira_reg_class_cover[i];
869 COPY_HARD_REG_SET (temp_hard_regset2,
870 reg_class_contents[cover_class]);
871 AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
872 if (hard_reg_set_equal_p (temp_hard_regset, temp_hard_regset2))
873 ira_class_translate[cl] = cover_class;
876 for (i = 0; i < ira_reg_class_cover_size; i++)
878 cover_class = ira_reg_class_cover[i];
879 if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY)
880 for (cl_ptr = &alloc_reg_class_subclasses[cover_class][0];
881 (cl = *cl_ptr) != LIM_REG_CLASSES;
884 if (ira_class_translate[cl] == NO_REGS)
885 ira_class_translate[cl] = cover_class;
886 #ifdef ENABLE_IRA_CHECKING
889 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
890 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
891 if (! hard_reg_set_empty_p (temp_hard_regset))
896 ira_class_translate[cover_class] = cover_class;
898 /* For classes which are not fully covered by a cover class (in
899 other words covered by more one cover class), use the cheapest
901 for (cl = 0; cl < N_REG_CLASSES; cl++)
903 if (cl == NO_REGS || ira_class_translate[cl] != NO_REGS)
905 best_class = NO_REGS;
907 for (i = 0; i < ira_reg_class_cover_size; i++)
909 cover_class = ira_reg_class_cover[i];
910 COPY_HARD_REG_SET (temp_hard_regset,
911 reg_class_contents[cover_class]);
912 AND_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
913 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
914 if (! hard_reg_set_empty_p (temp_hard_regset))
917 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
919 cost = (ira_memory_move_cost[mode][cl][0]
920 + ira_memory_move_cost[mode][cl][1]);
924 if (best_class == NO_REGS || best_cost > min_cost)
926 best_class = cover_class;
927 best_cost = min_cost;
931 ira_class_translate[cl] = best_class;
935 /* Order numbers of cover classes in original target cover class
936 array, -1 for non-cover classes. */
937 static int cover_class_order[N_REG_CLASSES];
939 /* The function used to sort the important classes. */
941 comp_reg_classes_func (const void *v1p, const void *v2p)
943 enum reg_class cl1 = *(const enum reg_class *) v1p;
944 enum reg_class cl2 = *(const enum reg_class *) v2p;
947 cl1 = ira_class_translate[cl1];
948 cl2 = ira_class_translate[cl2];
949 if (cl1 != NO_REGS && cl2 != NO_REGS
950 && (diff = cover_class_order[cl1] - cover_class_order[cl2]) != 0)
952 return (int) cl1 - (int) cl2;
955 /* Reorder important classes according to the order of their cover
956 classes. Set up array ira_important_class_nums too. */
958 reorder_important_classes (void)
962 for (i = 0; i < N_REG_CLASSES; i++)
963 cover_class_order[i] = -1;
964 for (i = 0; i < ira_reg_class_cover_size; i++)
965 cover_class_order[ira_reg_class_cover[i]] = i;
966 qsort (ira_important_classes, ira_important_classes_num,
967 sizeof (enum reg_class), comp_reg_classes_func);
968 for (i = 0; i < ira_important_classes_num; i++)
969 ira_important_class_nums[ira_important_classes[i]] = i;
972 /* The biggest important reg_class inside of intersection of the two
973 reg_classes (that is calculated taking only hard registers
974 available for allocation into account). If the both reg_classes
975 contain no hard registers available for allocation, the value is
976 calculated by taking all hard-registers including fixed ones into
978 enum reg_class ira_reg_class_intersect[N_REG_CLASSES][N_REG_CLASSES];
980 /* True if the two classes (that is calculated taking only hard
981 registers available for allocation into account) are
983 bool ira_reg_classes_intersect_p[N_REG_CLASSES][N_REG_CLASSES];
985 /* Important classes with end marker LIM_REG_CLASSES which are
986 supersets with given important class (the first index). That
987 includes given class itself. This is calculated taking only hard
988 registers available for allocation into account. */
989 enum reg_class ira_reg_class_super_classes[N_REG_CLASSES][N_REG_CLASSES];
991 /* The biggest important reg_class inside of union of the two
992 reg_classes (that is calculated taking only hard registers
993 available for allocation into account). If the both reg_classes
994 contain no hard registers available for allocation, the value is
995 calculated by taking all hard-registers including fixed ones into
996 account. In other words, the value is the corresponding
997 reg_class_subunion value. */
998 enum reg_class ira_reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
1000 /* Set up the above reg class relations. */
1002 setup_reg_class_relations (void)
1004 int i, cl1, cl2, cl3;
1005 HARD_REG_SET intersection_set, union_set, temp_set2;
1006 bool important_class_p[N_REG_CLASSES];
1008 memset (important_class_p, 0, sizeof (important_class_p));
1009 for (i = 0; i < ira_important_classes_num; i++)
1010 important_class_p[ira_important_classes[i]] = true;
1011 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1013 ira_reg_class_super_classes[cl1][0] = LIM_REG_CLASSES;
1014 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1016 ira_reg_classes_intersect_p[cl1][cl2] = false;
1017 ira_reg_class_intersect[cl1][cl2] = NO_REGS;
1018 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
1019 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1020 COPY_HARD_REG_SET (temp_set2, reg_class_contents[cl2]);
1021 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1022 if (hard_reg_set_empty_p (temp_hard_regset)
1023 && hard_reg_set_empty_p (temp_set2))
1027 cl3 = reg_class_subclasses[cl1][i];
1028 if (cl3 == LIM_REG_CLASSES)
1030 if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2],
1031 (enum reg_class) cl3))
1032 ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1034 ira_reg_class_union[cl1][cl2] = reg_class_subunion[cl1][cl2];
1037 ira_reg_classes_intersect_p[cl1][cl2]
1038 = hard_reg_set_intersect_p (temp_hard_regset, temp_set2);
1039 if (important_class_p[cl1] && important_class_p[cl2]
1040 && hard_reg_set_subset_p (temp_hard_regset, temp_set2))
1044 p = &ira_reg_class_super_classes[cl1][0];
1045 while (*p != LIM_REG_CLASSES)
1047 *p++ = (enum reg_class) cl2;
1048 *p = LIM_REG_CLASSES;
1050 ira_reg_class_union[cl1][cl2] = NO_REGS;
1051 COPY_HARD_REG_SET (intersection_set, reg_class_contents[cl1]);
1052 AND_HARD_REG_SET (intersection_set, reg_class_contents[cl2]);
1053 AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs);
1054 COPY_HARD_REG_SET (union_set, reg_class_contents[cl1]);
1055 IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]);
1056 AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs);
1057 for (i = 0; i < ira_important_classes_num; i++)
1059 cl3 = ira_important_classes[i];
1060 COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl3]);
1061 AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1062 if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
1066 reg_class_contents[(int)
1067 ira_reg_class_intersect[cl1][cl2]]);
1068 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1069 if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1070 /* Ignore unavailable hard registers and prefer
1071 smallest class for debugging purposes. */
1072 || (hard_reg_set_equal_p (temp_hard_regset, temp_set2)
1073 && hard_reg_set_subset_p
1074 (reg_class_contents[cl3],
1076 [(int) ira_reg_class_intersect[cl1][cl2]])))
1077 ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1079 if (hard_reg_set_subset_p (temp_hard_regset, union_set))
1083 reg_class_contents[(int) ira_reg_class_union[cl1][cl2]]);
1084 AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1085 if (ira_reg_class_union[cl1][cl2] == NO_REGS
1086 || (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
1088 && (! hard_reg_set_equal_p (temp_set2,
1090 /* Ignore unavailable hard registers and
1091 prefer smallest class for debugging
1093 || hard_reg_set_subset_p
1094 (reg_class_contents[cl3],
1096 [(int) ira_reg_class_union[cl1][cl2]]))))
1097 ira_reg_class_union[cl1][cl2] = (enum reg_class) cl3;
1104 /* Output all cover classes and the translation map into file F. */
1106 print_class_cover (FILE *f)
1108 static const char *const reg_class_names[] = REG_CLASS_NAMES;
1111 fprintf (f, "Class cover:\n");
1112 for (i = 0; i < ira_reg_class_cover_size; i++)
1113 fprintf (f, " %s", reg_class_names[ira_reg_class_cover[i]]);
1114 fprintf (f, "\nClass translation:\n");
1115 for (i = 0; i < N_REG_CLASSES; i++)
1116 fprintf (f, " %s -> %s\n", reg_class_names[i],
1117 reg_class_names[ira_class_translate[i]]);
1120 /* Output all cover classes and the translation map into
1123 ira_debug_class_cover (void)
1125 print_class_cover (stderr);
1128 /* Set up different arrays concerning class subsets, cover and
1129 important classes. */
1131 find_reg_class_closure (void)
1133 setup_reg_subclasses ();
1134 setup_cover_and_important_classes ();
1135 setup_class_translate ();
1136 reorder_important_classes ();
1137 setup_reg_class_relations ();
1142 /* Map: hard register number -> cover class it belongs to. If the
1143 corresponding class is NO_REGS, the hard register is not available
1145 enum reg_class ira_hard_regno_cover_class[FIRST_PSEUDO_REGISTER];
1147 /* Set up the array above. */
1149 setup_hard_regno_cover_class (void)
1154 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1156 ira_hard_regno_cover_class[i] = NO_REGS;
1157 for (j = 0; j < ira_reg_class_cover_size; j++)
1159 cl = ira_reg_class_cover[j];
1160 if (ira_class_hard_reg_index[cl][i] >= 0)
1162 ira_hard_regno_cover_class[i] = cl;
1172 /* Map: register class x machine mode -> number of hard registers of
1173 given class needed to store value of given mode. If the number is
1174 different, the size will be negative. */
1175 int ira_reg_class_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
1177 /* Maximal value of the previous array elements. */
1180 /* Form IRA_REG_CLASS_NREGS map. */
1182 setup_reg_class_nregs (void)
1187 for (cl = 0; cl < N_REG_CLASSES; cl++)
1188 for (m = 0; m < MAX_MACHINE_MODE; m++)
1190 ira_reg_class_nregs[cl][m] = CLASS_MAX_NREGS ((enum reg_class) cl,
1191 (enum machine_mode) m);
1192 if (ira_max_nregs < ira_reg_class_nregs[cl][m])
1193 ira_max_nregs = ira_reg_class_nregs[cl][m];
1199 /* Array whose values are hard regset of hard registers available for
1200 the allocation of given register class whose HARD_REGNO_MODE_OK
1201 values for given mode are zero. */
1202 HARD_REG_SET prohibited_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
1204 /* Set up PROHIBITED_CLASS_MODE_REGS. */
1206 setup_prohibited_class_mode_regs (void)
1208 int i, j, k, hard_regno;
1211 for (i = 0; i < ira_reg_class_cover_size; i++)
1213 cl = ira_reg_class_cover[i];
1214 for (j = 0; j < NUM_MACHINE_MODES; j++)
1216 CLEAR_HARD_REG_SET (prohibited_class_mode_regs[cl][j]);
1217 for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1219 hard_regno = ira_class_hard_regs[cl][k];
1220 if (! HARD_REGNO_MODE_OK (hard_regno, (enum machine_mode) j))
1221 SET_HARD_REG_BIT (prohibited_class_mode_regs[cl][j],
1230 /* Allocate and initialize IRA_REGISTER_MOVE_COST,
1231 IRA_MAY_MOVE_IN_COST, and IRA_MAY_MOVE_OUT_COST for MODE if it is
1234 ira_init_register_move_cost (enum machine_mode mode)
1238 ira_assert (ira_register_move_cost[mode] == NULL
1239 && ira_may_move_in_cost[mode] == NULL
1240 && ira_may_move_out_cost[mode] == NULL);
1241 if (move_cost[mode] == NULL)
1242 init_move_cost (mode);
1243 ira_register_move_cost[mode] = move_cost[mode];
1244 /* Don't use ira_allocate because the tables exist out of scope of a
1246 ira_may_move_in_cost[mode]
1247 = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1248 memcpy (ira_may_move_in_cost[mode], may_move_in_cost[mode],
1249 sizeof (move_table) * N_REG_CLASSES);
1250 ira_may_move_out_cost[mode]
1251 = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1252 memcpy (ira_may_move_out_cost[mode], may_move_out_cost[mode],
1253 sizeof (move_table) * N_REG_CLASSES);
1254 for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1256 for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1258 if (ira_class_subset_p[cl1][cl2])
1259 ira_may_move_in_cost[mode][cl1][cl2] = 0;
1260 if (ira_class_subset_p[cl2][cl1])
1261 ira_may_move_out_cost[mode][cl1][cl2] = 0;
1268 /* This is called once during compiler work. It sets up
1269 different arrays whose values don't depend on the compiled
1272 ira_init_once (void)
1276 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1278 ira_register_move_cost[mode] = NULL;
1279 ira_may_move_in_cost[mode] = NULL;
1280 ira_may_move_out_cost[mode] = NULL;
1282 ira_init_costs_once ();
1285 /* Free ira_register_move_cost, ira_may_move_in_cost, and
1286 ira_may_move_out_cost for each mode. */
1288 free_register_move_costs (void)
1292 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1294 if (ira_may_move_in_cost[mode] != NULL)
1295 free (ira_may_move_in_cost[mode]);
1296 if (ira_may_move_out_cost[mode] != NULL)
1297 free (ira_may_move_out_cost[mode]);
1298 ira_register_move_cost[mode] = NULL;
1299 ira_may_move_in_cost[mode] = NULL;
1300 ira_may_move_out_cost[mode] = NULL;
1304 /* This is called every time when register related information is
1309 free_register_move_costs ();
1310 setup_reg_mode_hard_regset ();
1311 setup_alloc_regs (flag_omit_frame_pointer != 0);
1312 setup_class_subset_and_memory_move_costs ();
1313 find_reg_class_closure ();
1314 setup_hard_regno_cover_class ();
1315 setup_reg_class_nregs ();
1316 setup_prohibited_class_mode_regs ();
1320 /* Function called once at the end of compiler work. */
1322 ira_finish_once (void)
1324 ira_finish_costs_once ();
1325 free_register_move_costs ();
1330 /* Array whose values are hard regset of hard registers for which
1331 move of the hard register in given mode into itself is
1333 HARD_REG_SET ira_prohibited_mode_move_regs[NUM_MACHINE_MODES];
1335 /* Flag of that the above array has been initialized. */
1336 static bool ira_prohibited_mode_move_regs_initialized_p = false;
1338 /* Set up IRA_PROHIBITED_MODE_MOVE_REGS. */
1340 setup_prohibited_mode_move_regs (void)
1343 rtx test_reg1, test_reg2, move_pat, move_insn;
1345 if (ira_prohibited_mode_move_regs_initialized_p)
1347 ira_prohibited_mode_move_regs_initialized_p = true;
1348 test_reg1 = gen_rtx_REG (VOIDmode, 0);
1349 test_reg2 = gen_rtx_REG (VOIDmode, 0);
1350 move_pat = gen_rtx_SET (VOIDmode, test_reg1, test_reg2);
1351 move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, 0, 0, move_pat, -1, 0);
1352 for (i = 0; i < NUM_MACHINE_MODES; i++)
1354 SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]);
1355 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
1357 if (! HARD_REGNO_MODE_OK (j, (enum machine_mode) i))
1359 SET_REGNO (test_reg1, j);
1360 PUT_MODE (test_reg1, (enum machine_mode) i);
1361 SET_REGNO (test_reg2, j);
1362 PUT_MODE (test_reg2, (enum machine_mode) i);
1363 INSN_CODE (move_insn) = -1;
1364 recog_memoized (move_insn);
1365 if (INSN_CODE (move_insn) < 0)
1367 extract_insn (move_insn);
1368 if (! constrain_operands (1))
1370 CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j);
1377 /* Function specific hard registers that can not be used for the
1378 register allocation. */
1379 HARD_REG_SET ira_no_alloc_regs;
1381 /* Return TRUE if *LOC contains an asm. */
1383 insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
1387 if (GET_CODE (*loc) == ASM_OPERANDS)
1393 /* Return TRUE if INSN contains an ASM. */
1395 insn_contains_asm (rtx insn)
1397 return for_each_rtx (&insn, insn_contains_asm_1, NULL);
1400 /* Set up regs_asm_clobbered. */
1402 compute_regs_asm_clobbered (char *regs_asm_clobbered)
1406 memset (regs_asm_clobbered, 0, sizeof (char) * FIRST_PSEUDO_REGISTER);
1411 FOR_BB_INSNS_REVERSE (bb, insn)
1415 if (insn_contains_asm (insn))
1416 for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
1418 df_ref def = *def_rec;
1419 unsigned int dregno = DF_REF_REGNO (def);
1420 if (dregno < FIRST_PSEUDO_REGISTER)
1423 enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (def));
1424 unsigned int end = dregno
1425 + hard_regno_nregs[dregno][mode] - 1;
1427 for (i = dregno; i <= end; ++i)
1428 regs_asm_clobbered[i] = 1;
1436 /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and REGS_EVER_LIVE. */
1438 ira_setup_eliminable_regset (void)
1440 /* Like regs_ever_live, but 1 if a reg is set or clobbered from an
1441 asm. Unlike regs_ever_live, elements of this array corresponding
1442 to eliminable regs (like the frame pointer) are set if an asm
1444 char *regs_asm_clobbered
1445 = (char *) alloca (FIRST_PSEUDO_REGISTER * sizeof (char));
1446 #ifdef ELIMINABLE_REGS
1448 static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
1450 /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore
1451 sp for alloca. So we can't eliminate the frame pointer in that
1452 case. At some point, we should improve this by emitting the
1453 sp-adjusting insns for this case. */
1455 = (! flag_omit_frame_pointer
1456 || (cfun->calls_alloca && EXIT_IGNORE_STACK)
1457 /* We need the frame pointer to catch stack overflow exceptions
1458 if the stack pointer is moving. */
1459 || (flag_stack_check && STACK_CHECK_MOVING_SP)
1460 || crtl->accesses_prior_frames
1461 || crtl->stack_realign_needed
1462 || targetm.frame_pointer_required ());
1464 frame_pointer_needed = need_fp;
1466 COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs);
1467 CLEAR_HARD_REG_SET (eliminable_regset);
1469 compute_regs_asm_clobbered (regs_asm_clobbered);
1470 /* Build the regset of all eliminable registers and show we can't
1471 use those that we already know won't be eliminated. */
1472 #ifdef ELIMINABLE_REGS
1473 for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
1476 = (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to)
1477 || (eliminables[i].to == STACK_POINTER_REGNUM && need_fp));
1479 if (! regs_asm_clobbered[eliminables[i].from])
1481 SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
1484 SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from);
1486 else if (cannot_elim)
1487 error ("%s cannot be used in asm here",
1488 reg_names[eliminables[i].from]);
1490 df_set_regs_ever_live (eliminables[i].from, true);
1492 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1493 if (! regs_asm_clobbered[HARD_FRAME_POINTER_REGNUM])
1495 SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
1497 SET_HARD_REG_BIT (ira_no_alloc_regs, HARD_FRAME_POINTER_REGNUM);
1500 error ("%s cannot be used in asm here",
1501 reg_names[HARD_FRAME_POINTER_REGNUM]);
1503 df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
1507 if (! regs_asm_clobbered[FRAME_POINTER_REGNUM])
1509 SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
1511 SET_HARD_REG_BIT (ira_no_alloc_regs, FRAME_POINTER_REGNUM);
1514 error ("%s cannot be used in asm here", reg_names[FRAME_POINTER_REGNUM]);
1516 df_set_regs_ever_live (FRAME_POINTER_REGNUM, true);
1522 /* The length of the following two arrays. */
1523 int ira_reg_equiv_len;
1525 /* The element value is TRUE if the corresponding regno value is
1527 bool *ira_reg_equiv_invariant_p;
1529 /* The element value is equiv constant of given pseudo-register or
1531 rtx *ira_reg_equiv_const;
1533 /* Set up the two arrays declared above. */
1535 find_reg_equiv_invariant_const (void)
1539 rtx list, insn, note, constant, x;
1541 for (i = FIRST_PSEUDO_REGISTER; i < reg_equiv_init_size; i++)
1543 constant = NULL_RTX;
1544 invariant_p = false;
1545 for (list = reg_equiv_init[i]; list != NULL_RTX; list = XEXP (list, 1))
1547 insn = XEXP (list, 0);
1548 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
1550 if (note == NULL_RTX)
1555 if (! function_invariant_p (x)
1557 /* A function invariant is often CONSTANT_P but may
1558 include a register. We promise to only pass CONSTANT_P
1559 objects to LEGITIMATE_PIC_OPERAND_P. */
1560 || (CONSTANT_P (x) && LEGITIMATE_PIC_OPERAND_P (x)))
1562 /* It can happen that a REG_EQUIV note contains a MEM
1563 that is not a legitimate memory operand. As later
1564 stages of the reload assume that all addresses found
1565 in the reg_equiv_* arrays were originally legitimate,
1566 we ignore such REG_EQUIV notes. */
1567 if (memory_operand (x, VOIDmode))
1568 invariant_p = MEM_READONLY_P (x);
1569 else if (function_invariant_p (x))
1571 if (GET_CODE (x) == PLUS
1572 || x == frame_pointer_rtx || x == arg_pointer_rtx)
1579 ira_reg_equiv_invariant_p[i] = invariant_p;
1580 ira_reg_equiv_const[i] = constant;
1586 /* Vector of substitutions of register numbers,
1587 used to map pseudo regs into hardware regs.
1588 This is set up as a result of register allocation.
1589 Element N is the hard reg assigned to pseudo reg N,
1590 or is -1 if no hard reg was assigned.
1591 If N is a hard reg number, element N is N. */
1592 short *reg_renumber;
1594 /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from
1595 the allocation found by IRA. */
1597 setup_reg_renumber (void)
1599 int regno, hard_regno;
1601 ira_allocno_iterator ai;
1603 caller_save_needed = 0;
1604 FOR_EACH_ALLOCNO (a, ai)
1606 /* There are no caps at this point. */
1607 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
1608 if (! ALLOCNO_ASSIGNED_P (a))
1609 /* It can happen if A is not referenced but partially anticipated
1610 somewhere in a region. */
1611 ALLOCNO_ASSIGNED_P (a) = true;
1612 ira_free_allocno_updated_costs (a);
1613 hard_regno = ALLOCNO_HARD_REGNO (a);
1614 regno = (int) REGNO (ALLOCNO_REG (a));
1615 reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno);
1616 if (hard_regno >= 0 && ALLOCNO_CALLS_CROSSED_NUM (a) != 0
1617 && ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
1620 ira_assert (!optimize || flag_caller_saves
1621 || regno >= ira_reg_equiv_len
1622 || ira_reg_equiv_const[regno]
1623 || ira_reg_equiv_invariant_p[regno]);
1624 caller_save_needed = 1;
1629 /* Set up allocno assignment flags for further allocation
1632 setup_allocno_assignment_flags (void)
1636 ira_allocno_iterator ai;
1638 FOR_EACH_ALLOCNO (a, ai)
1640 if (! ALLOCNO_ASSIGNED_P (a))
1641 /* It can happen if A is not referenced but partially anticipated
1642 somewhere in a region. */
1643 ira_free_allocno_updated_costs (a);
1644 hard_regno = ALLOCNO_HARD_REGNO (a);
1645 /* Don't assign hard registers to allocnos which are destination
1646 of removed store at the end of loop. It has no sense to keep
1647 the same value in different hard registers. It is also
1648 impossible to assign hard registers correctly to such
1649 allocnos because the cost info and info about intersected
1650 calls are incorrect for them. */
1651 ALLOCNO_ASSIGNED_P (a) = (hard_regno >= 0
1652 || ALLOCNO_MEM_OPTIMIZED_DEST_P (a)
1653 || (ALLOCNO_MEMORY_COST (a)
1654 - ALLOCNO_COVER_CLASS_COST (a)) < 0);
1655 ira_assert (hard_regno < 0
1656 || ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
1658 [ALLOCNO_COVER_CLASS (a)]));
1662 /* Evaluate overall allocation cost and the costs for using hard
1663 registers and memory for allocnos. */
1665 calculate_allocation_cost (void)
1667 int hard_regno, cost;
1669 ira_allocno_iterator ai;
1671 ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
1672 FOR_EACH_ALLOCNO (a, ai)
1674 hard_regno = ALLOCNO_HARD_REGNO (a);
1675 ira_assert (hard_regno < 0
1676 || ! ira_hard_reg_not_in_set_p
1677 (hard_regno, ALLOCNO_MODE (a),
1678 reg_class_contents[ALLOCNO_COVER_CLASS (a)]));
1681 cost = ALLOCNO_MEMORY_COST (a);
1682 ira_mem_cost += cost;
1684 else if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1686 cost = (ALLOCNO_HARD_REG_COSTS (a)
1687 [ira_class_hard_reg_index
1688 [ALLOCNO_COVER_CLASS (a)][hard_regno]]);
1689 ira_reg_cost += cost;
1693 cost = ALLOCNO_COVER_CLASS_COST (a);
1694 ira_reg_cost += cost;
1696 ira_overall_cost += cost;
1699 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
1701 fprintf (ira_dump_file,
1702 "+++Costs: overall %d, reg %d, mem %d, ld %d, st %d, move %d\n",
1703 ira_overall_cost, ira_reg_cost, ira_mem_cost,
1704 ira_load_cost, ira_store_cost, ira_shuffle_cost);
1705 fprintf (ira_dump_file, "+++ move loops %d, new jumps %d\n",
1706 ira_move_loops_num, ira_additional_jumps_num);
1711 #ifdef ENABLE_IRA_CHECKING
1712 /* Check the correctness of the allocation. We do need this because
1713 of complicated code to transform more one region internal
1714 representation into one region representation. */
1716 check_allocation (void)
1718 ira_allocno_t a, conflict_a;
1719 int hard_regno, conflict_hard_regno, nregs, conflict_nregs;
1720 ira_allocno_conflict_iterator aci;
1721 ira_allocno_iterator ai;
1723 FOR_EACH_ALLOCNO (a, ai)
1725 if (ALLOCNO_CAP_MEMBER (a) != NULL
1726 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1728 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
1729 FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
1730 if ((conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a)) >= 0)
1734 [conflict_hard_regno][ALLOCNO_MODE (conflict_a)]);
1735 if ((conflict_hard_regno <= hard_regno
1736 && hard_regno < conflict_hard_regno + conflict_nregs)
1737 || (hard_regno <= conflict_hard_regno
1738 && conflict_hard_regno < hard_regno + nregs))
1740 fprintf (stderr, "bad allocation for %d and %d\n",
1741 ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a));
1749 /* Fix values of array REG_EQUIV_INIT after live range splitting done
1752 fix_reg_equiv_init (void)
1754 int max_regno = max_reg_num ();
1756 rtx x, prev, next, insn, set;
1758 if (reg_equiv_init_size < max_regno)
1761 = (rtx *) ggc_realloc (reg_equiv_init, max_regno * sizeof (rtx));
1762 while (reg_equiv_init_size < max_regno)
1763 reg_equiv_init[reg_equiv_init_size++] = NULL_RTX;
1764 for (i = FIRST_PSEUDO_REGISTER; i < reg_equiv_init_size; i++)
1765 for (prev = NULL_RTX, x = reg_equiv_init[i]; x != NULL_RTX; x = next)
1769 set = single_set (insn);
1770 ira_assert (set != NULL_RTX
1771 && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))));
1772 if (REG_P (SET_DEST (set))
1773 && ((int) REGNO (SET_DEST (set)) == i
1774 || (int) ORIGINAL_REGNO (SET_DEST (set)) == i))
1775 new_regno = REGNO (SET_DEST (set));
1776 else if (REG_P (SET_SRC (set))
1777 && ((int) REGNO (SET_SRC (set)) == i
1778 || (int) ORIGINAL_REGNO (SET_SRC (set)) == i))
1779 new_regno = REGNO (SET_SRC (set));
1786 if (prev == NULL_RTX)
1787 reg_equiv_init[i] = next;
1789 XEXP (prev, 1) = next;
1790 XEXP (x, 1) = reg_equiv_init[new_regno];
1791 reg_equiv_init[new_regno] = x;
1797 #ifdef ENABLE_IRA_CHECKING
1798 /* Print redundant memory-memory copies. */
1800 print_redundant_copies (void)
1804 ira_copy_t cp, next_cp;
1805 ira_allocno_iterator ai;
1807 FOR_EACH_ALLOCNO (a, ai)
1809 if (ALLOCNO_CAP_MEMBER (a) != NULL)
1812 hard_regno = ALLOCNO_HARD_REGNO (a);
1813 if (hard_regno >= 0)
1815 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1817 next_cp = cp->next_first_allocno_copy;
1820 next_cp = cp->next_second_allocno_copy;
1821 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL
1822 && cp->insn != NULL_RTX
1823 && ALLOCNO_HARD_REGNO (cp->first) == hard_regno)
1824 fprintf (ira_dump_file,
1825 " Redundant move from %d(freq %d):%d\n",
1826 INSN_UID (cp->insn), cp->freq, hard_regno);
1832 /* Setup preferred and alternative classes for new pseudo-registers
1833 created by IRA starting with START. */
1835 setup_preferred_alternate_classes_for_new_pseudos (int start)
1838 int max_regno = max_reg_num ();
1840 for (i = start; i < max_regno; i++)
1842 old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]);
1843 ira_assert (i != old_regno);
1844 setup_reg_classes (i, reg_preferred_class (old_regno),
1845 reg_alternate_class (old_regno),
1846 reg_cover_class (old_regno));
1847 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1848 fprintf (ira_dump_file,
1849 " New r%d: setting preferred %s, alternative %s\n",
1850 i, reg_class_names[reg_preferred_class (old_regno)],
1851 reg_class_names[reg_alternate_class (old_regno)]);
1857 /* Regional allocation can create new pseudo-registers. This function
1858 expands some arrays for pseudo-registers. */
1860 expand_reg_info (int old_size)
1863 int size = max_reg_num ();
1866 for (i = old_size; i < size; i++)
1867 setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS);
1870 /* Return TRUE if there is too high register pressure in the function.
1871 It is used to decide when stack slot sharing is worth to do. */
1873 too_high_register_pressure_p (void)
1876 enum reg_class cover_class;
1878 for (i = 0; i < ira_reg_class_cover_size; i++)
1880 cover_class = ira_reg_class_cover[i];
1881 if (ira_loop_tree_root->reg_pressure[cover_class] > 10000)
1889 /* Indicate that hard register number FROM was eliminated and replaced with
1890 an offset from hard register number TO. The status of hard registers live
1891 at the start of a basic block is updated by replacing a use of FROM with
1895 mark_elimination (int from, int to)
1901 /* We don't use LIVE info in IRA. */
1902 regset r = DF_LR_IN (bb);
1904 if (REGNO_REG_SET_P (r, from))
1906 CLEAR_REGNO_REG_SET (r, from);
1907 SET_REGNO_REG_SET (r, to);
1916 /* Set when a REG_EQUIV note is found or created. Use to
1917 keep track of what memory accesses might be created later,
1921 /* The list of each instruction which initializes this register. */
1923 /* Loop depth is used to recognize equivalences which appear
1924 to be present within the same loop (or in an inner loop). */
1926 /* Nonzero if this had a preexisting REG_EQUIV note. */
1927 int is_arg_equivalence;
1928 /* Set when an attempt should be made to replace a register
1929 with the associated src_p entry. */
1933 /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
1934 structure for that register. */
1935 static struct equivalence *reg_equiv;
1937 /* Used for communication between the following two functions: contains
1938 a MEM that we wish to ensure remains unchanged. */
1939 static rtx equiv_mem;
1941 /* Set nonzero if EQUIV_MEM is modified. */
1942 static int equiv_mem_modified;
1944 /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
1945 Called via note_stores. */
1947 validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED,
1948 void *data ATTRIBUTE_UNUSED)
1951 && reg_overlap_mentioned_p (dest, equiv_mem))
1953 && true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p)))
1954 equiv_mem_modified = 1;
1957 /* Verify that no store between START and the death of REG invalidates
1958 MEMREF. MEMREF is invalidated by modifying a register used in MEMREF,
1959 by storing into an overlapping memory location, or with a non-const
1962 Return 1 if MEMREF remains valid. */
1964 validate_equiv_mem (rtx start, rtx reg, rtx memref)
1970 equiv_mem_modified = 0;
1972 /* If the memory reference has side effects or is volatile, it isn't a
1973 valid equivalence. */
1974 if (side_effects_p (memref))
1977 for (insn = start; insn && ! equiv_mem_modified; insn = NEXT_INSN (insn))
1979 if (! INSN_P (insn))
1982 if (find_reg_note (insn, REG_DEAD, reg))
1985 if (CALL_P (insn) && ! MEM_READONLY_P (memref)
1986 && ! RTL_CONST_OR_PURE_CALL_P (insn))
1989 note_stores (PATTERN (insn), validate_equiv_mem_from_store, NULL);
1991 /* If a register mentioned in MEMREF is modified via an
1992 auto-increment, we lose the equivalence. Do the same if one
1993 dies; although we could extend the life, it doesn't seem worth
1996 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1997 if ((REG_NOTE_KIND (note) == REG_INC
1998 || REG_NOTE_KIND (note) == REG_DEAD)
1999 && REG_P (XEXP (note, 0))
2000 && reg_overlap_mentioned_p (XEXP (note, 0), memref))
2007 /* Returns zero if X is known to be invariant. */
2009 equiv_init_varies_p (rtx x)
2011 RTX_CODE code = GET_CODE (x);
2018 return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0));
2030 return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0);
2033 if (MEM_VOLATILE_P (x))
2042 fmt = GET_RTX_FORMAT (code);
2043 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2046 if (equiv_init_varies_p (XEXP (x, i)))
2049 else if (fmt[i] == 'E')
2052 for (j = 0; j < XVECLEN (x, i); j++)
2053 if (equiv_init_varies_p (XVECEXP (x, i, j)))
2060 /* Returns nonzero if X (used to initialize register REGNO) is movable.
2061 X is only movable if the registers it uses have equivalent initializations
2062 which appear to be within the same loop (or in an inner loop) and movable
2063 or if they are not candidates for local_alloc and don't vary. */
2065 equiv_init_movable_p (rtx x, int regno)
2069 enum rtx_code code = GET_CODE (x);
2074 return equiv_init_movable_p (SET_SRC (x), regno);
2089 return (reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth
2090 && reg_equiv[REGNO (x)].replace)
2091 || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS && ! rtx_varies_p (x, 0));
2093 case UNSPEC_VOLATILE:
2097 if (MEM_VOLATILE_P (x))
2106 fmt = GET_RTX_FORMAT (code);
2107 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2111 if (! equiv_init_movable_p (XEXP (x, i), regno))
2115 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2116 if (! equiv_init_movable_p (XVECEXP (x, i, j), regno))
2124 /* TRUE if X uses any registers for which reg_equiv[REGNO].replace is true. */
2126 contains_replace_regs (rtx x)
2130 enum rtx_code code = GET_CODE (x);
2147 return reg_equiv[REGNO (x)].replace;
2153 fmt = GET_RTX_FORMAT (code);
2154 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2158 if (contains_replace_regs (XEXP (x, i)))
2162 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2163 if (contains_replace_regs (XVECEXP (x, i, j)))
2171 /* TRUE if X references a memory location that would be affected by a store
2174 memref_referenced_p (rtx memref, rtx x)
2178 enum rtx_code code = GET_CODE (x);
2196 return (reg_equiv[REGNO (x)].replacement
2197 && memref_referenced_p (memref,
2198 reg_equiv[REGNO (x)].replacement));
2201 if (true_dependence (memref, VOIDmode, x, rtx_varies_p))
2206 /* If we are setting a MEM, it doesn't count (its address does), but any
2207 other SET_DEST that has a MEM in it is referencing the MEM. */
2208 if (MEM_P (SET_DEST (x)))
2210 if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
2213 else if (memref_referenced_p (memref, SET_DEST (x)))
2216 return memref_referenced_p (memref, SET_SRC (x));
2222 fmt = GET_RTX_FORMAT (code);
2223 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2227 if (memref_referenced_p (memref, XEXP (x, i)))
2231 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2232 if (memref_referenced_p (memref, XVECEXP (x, i, j)))
2240 /* TRUE if some insn in the range (START, END] references a memory location
2241 that would be affected by a store to MEMREF. */
2243 memref_used_between_p (rtx memref, rtx start, rtx end)
2247 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2248 insn = NEXT_INSN (insn))
2250 if (!NONDEBUG_INSN_P (insn))
2253 if (memref_referenced_p (memref, PATTERN (insn)))
2256 /* Nonconst functions may access memory. */
2257 if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
2264 /* Mark REG as having no known equivalence.
2265 Some instructions might have been processed before and furnished
2266 with REG_EQUIV notes for this register; these notes will have to be
2268 STORE is the piece of RTL that does the non-constant / conflicting
2269 assignment - a SET, CLOBBER or REG_INC note. It is currently not used,
2270 but needs to be there because this function is called from note_stores. */
2272 no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED)
2279 regno = REGNO (reg);
2280 list = reg_equiv[regno].init_insns;
2281 if (list == const0_rtx)
2283 reg_equiv[regno].init_insns = const0_rtx;
2284 reg_equiv[regno].replacement = NULL_RTX;
2285 /* This doesn't matter for equivalences made for argument registers, we
2286 should keep their initialization insns. */
2287 if (reg_equiv[regno].is_arg_equivalence)
2289 reg_equiv_init[regno] = NULL_RTX;
2290 for (; list; list = XEXP (list, 1))
2292 rtx insn = XEXP (list, 0);
2293 remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
2297 /* In DEBUG_INSN location adjust REGs from CLEARED_REGS bitmap to the
2298 equivalent replacement. */
2301 adjust_cleared_regs (rtx loc, const_rtx old_rtx ATTRIBUTE_UNUSED, void *data)
2305 bitmap cleared_regs = (bitmap) data;
2306 if (bitmap_bit_p (cleared_regs, REGNO (loc)))
2307 return simplify_replace_fn_rtx (*reg_equiv[REGNO (loc)].src_p,
2308 NULL_RTX, adjust_cleared_regs, data);
2313 /* Nonzero if we recorded an equivalence for a LABEL_REF. */
2314 static int recorded_label_ref;
2316 /* Find registers that are equivalent to a single value throughout the
2317 compilation (either because they can be referenced in memory or are set once
2318 from a single constant). Lower their priority for a register.
2320 If such a register is only referenced once, try substituting its value
2321 into the using insn. If it succeeds, we can eliminate the register
2324 Initialize the REG_EQUIV_INIT array of initializing insns.
2326 Return non-zero if jump label rebuilding should be done. */
2328 update_equiv_regs (void)
2333 bitmap cleared_regs;
2335 /* We need to keep track of whether or not we recorded a LABEL_REF so
2336 that we know if the jump optimizer needs to be rerun. */
2337 recorded_label_ref = 0;
2339 reg_equiv = XCNEWVEC (struct equivalence, max_regno);
2340 reg_equiv_init = GGC_CNEWVEC (rtx, max_regno);
2341 reg_equiv_init_size = max_regno;
2343 init_alias_analysis ();
2345 /* Scan the insns and find which registers have equivalences. Do this
2346 in a separate scan of the insns because (due to -fcse-follow-jumps)
2347 a register can be set below its use. */
2350 loop_depth = bb->loop_depth;
2352 for (insn = BB_HEAD (bb);
2353 insn != NEXT_INSN (BB_END (bb));
2354 insn = NEXT_INSN (insn))
2361 if (! INSN_P (insn))
2364 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2365 if (REG_NOTE_KIND (note) == REG_INC)
2366 no_equiv (XEXP (note, 0), note, NULL);
2368 set = single_set (insn);
2370 /* If this insn contains more (or less) than a single SET,
2371 only mark all destinations as having no known equivalence. */
2374 note_stores (PATTERN (insn), no_equiv, NULL);
2377 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2381 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
2383 rtx part = XVECEXP (PATTERN (insn), 0, i);
2385 note_stores (part, no_equiv, NULL);
2389 dest = SET_DEST (set);
2390 src = SET_SRC (set);
2392 /* See if this is setting up the equivalence between an argument
2393 register and its stack slot. */
2394 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
2397 gcc_assert (REG_P (dest));
2398 regno = REGNO (dest);
2400 /* Note that we don't want to clear reg_equiv_init even if there
2401 are multiple sets of this register. */
2402 reg_equiv[regno].is_arg_equivalence = 1;
2404 /* Record for reload that this is an equivalencing insn. */
2405 if (rtx_equal_p (src, XEXP (note, 0)))
2406 reg_equiv_init[regno]
2407 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[regno]);
2409 /* Continue normally in case this is a candidate for
2416 /* We only handle the case of a pseudo register being set
2417 once, or always to the same value. */
2418 /* ??? The mn10200 port breaks if we add equivalences for
2419 values that need an ADDRESS_REGS register and set them equivalent
2420 to a MEM of a pseudo. The actual problem is in the over-conservative
2421 handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
2422 calculate_needs, but we traditionally work around this problem
2423 here by rejecting equivalences when the destination is in a register
2424 that's likely spilled. This is fragile, of course, since the
2425 preferred class of a pseudo depends on all instructions that set
2429 || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
2430 || reg_equiv[regno].init_insns == const0_rtx
2431 || (CLASS_LIKELY_SPILLED_P (reg_preferred_class (regno))
2432 && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence))
2434 /* This might be setting a SUBREG of a pseudo, a pseudo that is
2435 also set somewhere else to a constant. */
2436 note_stores (set, no_equiv, NULL);
2440 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
2442 /* cse sometimes generates function invariants, but doesn't put a
2443 REG_EQUAL note on the insn. Since this note would be redundant,
2444 there's no point creating it earlier than here. */
2445 if (! note && ! rtx_varies_p (src, 0))
2446 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2448 /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
2449 since it represents a function call */
2450 if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST)
2453 if (DF_REG_DEF_COUNT (regno) != 1
2455 || rtx_varies_p (XEXP (note, 0), 0)
2456 || (reg_equiv[regno].replacement
2457 && ! rtx_equal_p (XEXP (note, 0),
2458 reg_equiv[regno].replacement))))
2460 no_equiv (dest, set, NULL);
2463 /* Record this insn as initializing this register. */
2464 reg_equiv[regno].init_insns
2465 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns);
2467 /* If this register is known to be equal to a constant, record that
2468 it is always equivalent to the constant. */
2469 if (DF_REG_DEF_COUNT (regno) == 1
2470 && note && ! rtx_varies_p (XEXP (note, 0), 0))
2472 rtx note_value = XEXP (note, 0);
2473 remove_note (insn, note);
2474 set_unique_reg_note (insn, REG_EQUIV, note_value);
2477 /* If this insn introduces a "constant" register, decrease the priority
2478 of that register. Record this insn if the register is only used once
2479 more and the equivalence value is the same as our source.
2481 The latter condition is checked for two reasons: First, it is an
2482 indication that it may be more efficient to actually emit the insn
2483 as written (if no registers are available, reload will substitute
2484 the equivalence). Secondly, it avoids problems with any registers
2485 dying in this insn whose death notes would be missed.
2487 If we don't have a REG_EQUIV note, see if this insn is loading
2488 a register used only in one basic block from a MEM. If so, and the
2489 MEM remains unchanged for the life of the register, add a REG_EQUIV
2492 note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
2494 if (note == 0 && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
2495 && MEM_P (SET_SRC (set))
2496 && validate_equiv_mem (insn, dest, SET_SRC (set)))
2497 note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (SET_SRC (set)));
2501 int regno = REGNO (dest);
2502 rtx x = XEXP (note, 0);
2504 /* If we haven't done so, record for reload that this is an
2505 equivalencing insn. */
2506 if (!reg_equiv[regno].is_arg_equivalence)
2507 reg_equiv_init[regno]
2508 = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[regno]);
2510 /* Record whether or not we created a REG_EQUIV note for a LABEL_REF.
2511 We might end up substituting the LABEL_REF for uses of the
2512 pseudo here or later. That kind of transformation may turn an
2513 indirect jump into a direct jump, in which case we must rerun the
2514 jump optimizer to ensure that the JUMP_LABEL fields are valid. */
2515 if (GET_CODE (x) == LABEL_REF
2516 || (GET_CODE (x) == CONST
2517 && GET_CODE (XEXP (x, 0)) == PLUS
2518 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)))
2519 recorded_label_ref = 1;
2521 reg_equiv[regno].replacement = x;
2522 reg_equiv[regno].src_p = &SET_SRC (set);
2523 reg_equiv[regno].loop_depth = loop_depth;
2525 /* Don't mess with things live during setjmp. */
2526 if (REG_LIVE_LENGTH (regno) >= 0 && optimize)
2528 /* Note that the statement below does not affect the priority
2530 REG_LIVE_LENGTH (regno) *= 2;
2532 /* If the register is referenced exactly twice, meaning it is
2533 set once and used once, indicate that the reference may be
2534 replaced by the equivalence we computed above. Do this
2535 even if the register is only used in one block so that
2536 dependencies can be handled where the last register is
2537 used in a different block (i.e. HIGH / LO_SUM sequences)
2538 and to reduce the number of registers alive across
2541 if (REG_N_REFS (regno) == 2
2542 && (rtx_equal_p (x, src)
2543 || ! equiv_init_varies_p (src))
2544 && NONJUMP_INSN_P (insn)
2545 && equiv_init_movable_p (PATTERN (insn), regno))
2546 reg_equiv[regno].replace = 1;
2555 /* A second pass, to gather additional equivalences with memory. This needs
2556 to be done after we know which registers we are going to replace. */
2558 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2563 if (! INSN_P (insn))
2566 set = single_set (insn);
2570 dest = SET_DEST (set);
2571 src = SET_SRC (set);
2573 /* If this sets a MEM to the contents of a REG that is only used
2574 in a single basic block, see if the register is always equivalent
2575 to that memory location and if moving the store from INSN to the
2576 insn that set REG is safe. If so, put a REG_EQUIV note on the
2579 Don't add a REG_EQUIV note if the insn already has one. The existing
2580 REG_EQUIV is likely more useful than the one we are adding.
2582 If one of the regs in the address has reg_equiv[REGNO].replace set,
2583 then we can't add this REG_EQUIV note. The reg_equiv[REGNO].replace
2584 optimization may move the set of this register immediately before
2585 insn, which puts it after reg_equiv[REGNO].init_insns, and hence
2586 the mention in the REG_EQUIV note would be to an uninitialized
2589 if (MEM_P (dest) && REG_P (src)
2590 && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
2591 && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
2592 && DF_REG_DEF_COUNT (regno) == 1
2593 && reg_equiv[regno].init_insns != 0
2594 && reg_equiv[regno].init_insns != const0_rtx
2595 && ! find_reg_note (XEXP (reg_equiv[regno].init_insns, 0),
2596 REG_EQUIV, NULL_RTX)
2597 && ! contains_replace_regs (XEXP (dest, 0)))
2599 rtx init_insn = XEXP (reg_equiv[regno].init_insns, 0);
2600 if (validate_equiv_mem (init_insn, src, dest)
2601 && ! memref_used_between_p (dest, init_insn, insn)
2602 /* Attaching a REG_EQUIV note will fail if INIT_INSN has
2604 && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest)))
2606 /* This insn makes the equivalence, not the one initializing
2608 reg_equiv_init[regno]
2609 = gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
2610 df_notes_rescan (init_insn);
2615 cleared_regs = BITMAP_ALLOC (NULL);
2616 /* Now scan all regs killed in an insn to see if any of them are
2617 registers only used that once. If so, see if we can replace the
2618 reference with the equivalent form. If we can, delete the
2619 initializing reference and this register will go away. If we
2620 can't replace the reference, and the initializing reference is
2621 within the same loop (or in an inner loop), then move the register
2622 initialization just before the use, so that they are in the same
2624 FOR_EACH_BB_REVERSE (bb)
2626 loop_depth = bb->loop_depth;
2627 for (insn = BB_END (bb);
2628 insn != PREV_INSN (BB_HEAD (bb));
2629 insn = PREV_INSN (insn))
2633 if (! INSN_P (insn))
2636 /* Don't substitute into a non-local goto, this confuses CFG. */
2638 && find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2641 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2643 if (REG_NOTE_KIND (link) == REG_DEAD
2644 /* Make sure this insn still refers to the register. */
2645 && reg_mentioned_p (XEXP (link, 0), PATTERN (insn)))
2647 int regno = REGNO (XEXP (link, 0));
2650 if (! reg_equiv[regno].replace
2651 || reg_equiv[regno].loop_depth < loop_depth)
2654 /* reg_equiv[REGNO].replace gets set only when
2655 REG_N_REFS[REGNO] is 2, i.e. the register is set
2656 once and used once. (If it were only set, but not used,
2657 flow would have deleted the setting insns.) Hence
2658 there can only be one insn in reg_equiv[REGNO].init_insns. */
2659 gcc_assert (reg_equiv[regno].init_insns
2660 && !XEXP (reg_equiv[regno].init_insns, 1));
2661 equiv_insn = XEXP (reg_equiv[regno].init_insns, 0);
2663 /* We may not move instructions that can throw, since
2664 that changes basic block boundaries and we are not
2665 prepared to adjust the CFG to match. */
2666 if (can_throw_internal (equiv_insn))
2669 if (asm_noperands (PATTERN (equiv_insn)) < 0
2670 && validate_replace_rtx (regno_reg_rtx[regno],
2671 *(reg_equiv[regno].src_p), insn))
2677 /* Find the last note. */
2678 for (last_link = link; XEXP (last_link, 1);
2679 last_link = XEXP (last_link, 1))
2682 /* Append the REG_DEAD notes from equiv_insn. */
2683 equiv_link = REG_NOTES (equiv_insn);
2687 equiv_link = XEXP (equiv_link, 1);
2688 if (REG_NOTE_KIND (note) == REG_DEAD)
2690 remove_note (equiv_insn, note);
2691 XEXP (last_link, 1) = note;
2692 XEXP (note, 1) = NULL_RTX;
2697 remove_death (regno, insn);
2698 SET_REG_N_REFS (regno, 0);
2699 REG_FREQ (regno) = 0;
2700 delete_insn (equiv_insn);
2702 reg_equiv[regno].init_insns
2703 = XEXP (reg_equiv[regno].init_insns, 1);
2705 reg_equiv_init[regno] = NULL_RTX;
2706 bitmap_set_bit (cleared_regs, regno);
2708 /* Move the initialization of the register to just before
2709 INSN. Update the flow information. */
2710 else if (prev_nondebug_insn (insn) != equiv_insn)
2714 new_insn = emit_insn_before (PATTERN (equiv_insn), insn);
2715 REG_NOTES (new_insn) = REG_NOTES (equiv_insn);
2716 REG_NOTES (equiv_insn) = 0;
2717 /* Rescan it to process the notes. */
2718 df_insn_rescan (new_insn);
2720 /* Make sure this insn is recognized before
2721 reload begins, otherwise
2722 eliminate_regs_in_insn will die. */
2723 INSN_CODE (new_insn) = INSN_CODE (equiv_insn);
2725 delete_insn (equiv_insn);
2727 XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
2729 REG_BASIC_BLOCK (regno) = bb->index;
2730 REG_N_CALLS_CROSSED (regno) = 0;
2731 REG_FREQ_CALLS_CROSSED (regno) = 0;
2732 REG_N_THROWING_CALLS_CROSSED (regno) = 0;
2733 REG_LIVE_LENGTH (regno) = 2;
2735 if (insn == BB_HEAD (bb))
2736 BB_HEAD (bb) = PREV_INSN (insn);
2738 reg_equiv_init[regno]
2739 = gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX);
2740 bitmap_set_bit (cleared_regs, regno);
2747 if (!bitmap_empty_p (cleared_regs))
2751 bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
2752 bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
2753 bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
2754 bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
2757 /* Last pass - adjust debug insns referencing cleared regs. */
2758 if (MAY_HAVE_DEBUG_INSNS)
2759 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2760 if (DEBUG_INSN_P (insn))
2762 rtx old_loc = INSN_VAR_LOCATION_LOC (insn);
2763 INSN_VAR_LOCATION_LOC (insn)
2764 = simplify_replace_fn_rtx (old_loc, NULL_RTX,
2765 adjust_cleared_regs,
2766 (void *) cleared_regs);
2767 if (old_loc != INSN_VAR_LOCATION_LOC (insn))
2768 df_insn_rescan (insn);
2772 BITMAP_FREE (cleared_regs);
2777 end_alias_analysis ();
2779 return recorded_label_ref;
2784 /* Print chain C to FILE. */
2786 print_insn_chain (FILE *file, struct insn_chain *c)
2788 fprintf (file, "insn=%d, ", INSN_UID(c->insn));
2789 bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
2790 bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n");
2794 /* Print all reload_insn_chains to FILE. */
2796 print_insn_chains (FILE *file)
2798 struct insn_chain *c;
2799 for (c = reload_insn_chain; c ; c = c->next)
2800 print_insn_chain (file, c);
2803 /* Return true if pseudo REGNO should be added to set live_throughout
2804 or dead_or_set of the insn chains for reload consideration. */
2806 pseudo_for_reload_consideration_p (int regno)
2808 /* Consider spilled pseudos too for IRA because they still have a
2809 chance to get hard-registers in the reload when IRA is used. */
2810 return (reg_renumber[regno] >= 0
2811 || (ira_conflicts_p && flag_ira_share_spill_slots));
2814 /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] using
2815 REG to the number of nregs, and INIT_VALUE to get the
2816 initialization. ALLOCNUM need not be the regno of REG. */
2818 init_live_subregs (bool init_value, sbitmap *live_subregs,
2819 int *live_subregs_used, int allocnum, rtx reg)
2821 unsigned int regno = REGNO (SUBREG_REG (reg));
2822 int size = GET_MODE_SIZE (GET_MODE (regno_reg_rtx[regno]));
2824 gcc_assert (size > 0);
2826 /* Been there, done that. */
2827 if (live_subregs_used[allocnum])
2830 /* Create a new one with zeros. */
2831 if (live_subregs[allocnum] == NULL)
2832 live_subregs[allocnum] = sbitmap_alloc (size);
2834 /* If the entire reg was live before blasting into subregs, we need
2835 to init all of the subregs to ones else init to 0. */
2837 sbitmap_ones (live_subregs[allocnum]);
2839 sbitmap_zero (live_subregs[allocnum]);
2841 /* Set the number of bits that we really want. */
2842 live_subregs_used[allocnum] = size;
2845 /* Walk the insns of the current function and build reload_insn_chain,
2846 and record register life information. */
2848 build_insn_chain (void)
2851 struct insn_chain **p = &reload_insn_chain;
2853 struct insn_chain *c = NULL;
2854 struct insn_chain *next = NULL;
2855 bitmap live_relevant_regs = BITMAP_ALLOC (NULL);
2856 bitmap elim_regset = BITMAP_ALLOC (NULL);
2857 /* live_subregs is a vector used to keep accurate information about
2858 which hardregs are live in multiword pseudos. live_subregs and
2859 live_subregs_used are indexed by pseudo number. The live_subreg
2860 entry for a particular pseudo is only used if the corresponding
2861 element is non zero in live_subregs_used. The value in
2862 live_subregs_used is number of bytes that the pseudo can
2864 sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno);
2865 int *live_subregs_used = XNEWVEC (int, max_regno);
2867 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2868 if (TEST_HARD_REG_BIT (eliminable_regset, i))
2869 bitmap_set_bit (elim_regset, i);
2870 FOR_EACH_BB_REVERSE (bb)
2875 CLEAR_REG_SET (live_relevant_regs);
2876 memset (live_subregs_used, 0, max_regno * sizeof (int));
2878 EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb), 0, i, bi)
2880 if (i >= FIRST_PSEUDO_REGISTER)
2882 bitmap_set_bit (live_relevant_regs, i);
2885 EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb),
2886 FIRST_PSEUDO_REGISTER, i, bi)
2888 if (pseudo_for_reload_consideration_p (i))
2889 bitmap_set_bit (live_relevant_regs, i);
2892 FOR_BB_INSNS_REVERSE (bb, insn)
2894 if (!NOTE_P (insn) && !BARRIER_P (insn))
2896 unsigned int uid = INSN_UID (insn);
2900 c = new_insn_chain ();
2907 c->block = bb->index;
2910 for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
2912 df_ref def = *def_rec;
2913 unsigned int regno = DF_REF_REGNO (def);
2915 /* Ignore may clobbers because these are generated
2916 from calls. However, every other kind of def is
2917 added to dead_or_set. */
2918 if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2920 if (regno < FIRST_PSEUDO_REGISTER)
2922 if (!fixed_regs[regno])
2923 bitmap_set_bit (&c->dead_or_set, regno);
2925 else if (pseudo_for_reload_consideration_p (regno))
2926 bitmap_set_bit (&c->dead_or_set, regno);
2929 if ((regno < FIRST_PSEUDO_REGISTER
2930 || reg_renumber[regno] >= 0
2932 && (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
2934 rtx reg = DF_REF_REG (def);
2936 /* We can model subregs, but not if they are
2937 wrapped in ZERO_EXTRACTS. */
2938 if (GET_CODE (reg) == SUBREG
2939 && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
2941 unsigned int start = SUBREG_BYTE (reg);
2942 unsigned int last = start
2943 + GET_MODE_SIZE (GET_MODE (reg));
2946 (bitmap_bit_p (live_relevant_regs, regno),
2947 live_subregs, live_subregs_used, regno, reg);
2949 if (!DF_REF_FLAGS_IS_SET
2950 (def, DF_REF_STRICT_LOW_PART))
2952 /* Expand the range to cover entire words.
2953 Bytes added here are "don't care". */
2955 = start / UNITS_PER_WORD * UNITS_PER_WORD;
2956 last = ((last + UNITS_PER_WORD - 1)
2957 / UNITS_PER_WORD * UNITS_PER_WORD);
2960 /* Ignore the paradoxical bits. */
2961 if ((int)last > live_subregs_used[regno])
2962 last = live_subregs_used[regno];
2964 while (start < last)
2966 RESET_BIT (live_subregs[regno], start);
2970 if (sbitmap_empty_p (live_subregs[regno]))
2972 live_subregs_used[regno] = 0;
2973 bitmap_clear_bit (live_relevant_regs, regno);
2976 /* Set live_relevant_regs here because
2977 that bit has to be true to get us to
2978 look at the live_subregs fields. */
2979 bitmap_set_bit (live_relevant_regs, regno);
2983 /* DF_REF_PARTIAL is generated for
2984 subregs, STRICT_LOW_PART, and
2985 ZERO_EXTRACT. We handle the subreg
2986 case above so here we have to keep from
2987 modeling the def as a killing def. */
2988 if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL))
2990 bitmap_clear_bit (live_relevant_regs, regno);
2991 live_subregs_used[regno] = 0;
2997 bitmap_and_compl_into (live_relevant_regs, elim_regset);
2998 bitmap_copy (&c->live_throughout, live_relevant_regs);
3001 for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
3003 df_ref use = *use_rec;
3004 unsigned int regno = DF_REF_REGNO (use);
3005 rtx reg = DF_REF_REG (use);
3007 /* DF_REF_READ_WRITE on a use means that this use
3008 is fabricated from a def that is a partial set
3009 to a multiword reg. Here, we only model the
3010 subreg case that is not wrapped in ZERO_EXTRACT
3011 precisely so we do not need to look at the
3013 if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
3014 && !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
3015 && DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
3018 /* Add the last use of each var to dead_or_set. */
3019 if (!bitmap_bit_p (live_relevant_regs, regno))
3021 if (regno < FIRST_PSEUDO_REGISTER)
3023 if (!fixed_regs[regno])
3024 bitmap_set_bit (&c->dead_or_set, regno);
3026 else if (pseudo_for_reload_consideration_p (regno))
3027 bitmap_set_bit (&c->dead_or_set, regno);
3030 if (regno < FIRST_PSEUDO_REGISTER
3031 || pseudo_for_reload_consideration_p (regno))
3033 if (GET_CODE (reg) == SUBREG
3034 && !DF_REF_FLAGS_IS_SET (use,
3036 | DF_REF_ZERO_EXTRACT))
3038 unsigned int start = SUBREG_BYTE (reg);
3039 unsigned int last = start
3040 + GET_MODE_SIZE (GET_MODE (reg));
3043 (bitmap_bit_p (live_relevant_regs, regno),
3044 live_subregs, live_subregs_used, regno, reg);
3046 /* Ignore the paradoxical bits. */
3047 if ((int)last > live_subregs_used[regno])
3048 last = live_subregs_used[regno];
3050 while (start < last)
3052 SET_BIT (live_subregs[regno], start);
3057 /* Resetting the live_subregs_used is
3058 effectively saying do not use the subregs
3059 because we are reading the whole
3061 live_subregs_used[regno] = 0;
3062 bitmap_set_bit (live_relevant_regs, regno);
3068 /* FIXME!! The following code is a disaster. Reload needs to see the
3069 labels and jump tables that are just hanging out in between
3070 the basic blocks. See pr33676. */
3071 insn = BB_HEAD (bb);
3073 /* Skip over the barriers and cruft. */
3074 while (insn && (BARRIER_P (insn) || NOTE_P (insn)
3075 || BLOCK_FOR_INSN (insn) == bb))
3076 insn = PREV_INSN (insn);
3078 /* While we add anything except barriers and notes, the focus is
3079 to get the labels and jump tables into the
3080 reload_insn_chain. */
3083 if (!NOTE_P (insn) && !BARRIER_P (insn))
3085 if (BLOCK_FOR_INSN (insn))
3088 c = new_insn_chain ();
3094 /* The block makes no sense here, but it is what the old
3096 c->block = bb->index;
3098 bitmap_copy (&c->live_throughout, live_relevant_regs);
3100 insn = PREV_INSN (insn);
3104 for (i = 0; i < (unsigned int) max_regno; i++)
3105 if (live_subregs[i])
3106 free (live_subregs[i]);
3108 reload_insn_chain = c;
3111 free (live_subregs);
3112 free (live_subregs_used);
3113 BITMAP_FREE (live_relevant_regs);
3114 BITMAP_FREE (elim_regset);
3117 print_insn_chains (dump_file);
3122 /* All natural loops. */
3123 struct loops ira_loops;
3125 /* True if we have allocno conflicts. It is false for non-optimized
3126 mode or when the conflict table is too big. */
3127 bool ira_conflicts_p;
3129 /* This is the main entry of IRA. */
3133 int overall_cost_before, allocated_reg_info_size;
3135 int max_regno_before_ira, ira_max_point_before_emit;
3137 int saved_flag_ira_share_spill_slots;
3140 timevar_push (TV_IRA);
3142 if (flag_caller_saves)
3143 init_caller_save ();
3145 if (flag_ira_verbose < 10)
3147 internal_flag_ira_verbose = flag_ira_verbose;
3152 internal_flag_ira_verbose = flag_ira_verbose - 10;
3153 ira_dump_file = stderr;
3156 ira_conflicts_p = optimize > 0;
3157 setup_prohibited_mode_move_regs ();
3159 df_note_add_problem ();
3163 df_live_add_problem ();
3164 df_live_set_all_dirty ();
3166 #ifdef ENABLE_CHECKING
3167 df->changeable_flags |= DF_VERIFY_SCHEDULED;
3170 df_clear_flags (DF_NO_INSN_RESCAN);
3171 regstat_init_n_sets_and_refs ();
3172 regstat_compute_ri ();
3174 /* If we are not optimizing, then this is the only place before
3175 register allocation where dataflow is done. And that is needed
3176 to generate these warnings. */
3178 generate_setjmp_warnings ();
3180 /* Determine if the current function is a leaf before running IRA
3181 since this can impact optimizations done by the prologue and
3182 epilogue thus changing register elimination offsets. */
3183 current_function_is_leaf = leaf_function_p ();
3185 if (resize_reg_info () && flag_ira_loop_pressure)
3186 ira_set_pseudo_classes (ira_dump_file);
3188 rebuild_p = update_equiv_regs ();
3190 #ifndef IRA_NO_OBSTACK
3191 gcc_obstack_init (&ira_obstack);
3193 bitmap_obstack_initialize (&ira_bitmap_obstack);
3196 max_regno = max_reg_num ();
3197 ira_reg_equiv_len = max_regno;
3198 ira_reg_equiv_invariant_p
3199 = (bool *) ira_allocate (max_regno * sizeof (bool));
3200 memset (ira_reg_equiv_invariant_p, 0, max_regno * sizeof (bool));
3201 ira_reg_equiv_const = (rtx *) ira_allocate (max_regno * sizeof (rtx));
3202 memset (ira_reg_equiv_const, 0, max_regno * sizeof (rtx));
3203 find_reg_equiv_invariant_const ();
3206 timevar_push (TV_JUMP);
3207 rebuild_jump_labels (get_insns ());
3208 purge_all_dead_edges ();
3209 timevar_pop (TV_JUMP);
3213 max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
3214 ira_setup_eliminable_regset ();
3216 ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
3217 ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
3218 ira_move_loops_num = ira_additional_jumps_num = 0;
3220 ira_assert (current_loops == NULL);
3221 flow_loops_find (&ira_loops);
3222 record_loop_exits ();
3223 current_loops = &ira_loops;
3225 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3226 fprintf (ira_dump_file, "Building IRA IR\n");
3227 loops_p = ira_build (optimize
3228 && (flag_ira_region == IRA_REGION_ALL
3229 || flag_ira_region == IRA_REGION_MIXED));
3231 ira_assert (ira_conflicts_p || !loops_p);
3233 saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots;
3234 if (too_high_register_pressure_p ())
3235 /* It is just wasting compiler's time to pack spilled pseudos into
3236 stack slots in this case -- prohibit it. */
3237 flag_ira_share_spill_slots = FALSE;
3241 ira_max_point_before_emit = ira_max_point;
3245 if (ira_conflicts_p)
3247 max_regno = max_reg_num ();
3250 ira_initiate_assign ();
3253 expand_reg_info (allocated_reg_info_size);
3254 setup_preferred_alternate_classes_for_new_pseudos
3255 (allocated_reg_info_size);
3256 allocated_reg_info_size = max_regno;
3258 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3259 fprintf (ira_dump_file, "Flattening IR\n");
3260 ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
3261 /* New insns were generated: add notes and recalculate live
3265 flow_loops_find (&ira_loops);
3266 record_loop_exits ();
3267 current_loops = &ira_loops;
3269 setup_allocno_assignment_flags ();
3270 ira_initiate_assign ();
3271 ira_reassign_conflict_allocnos (max_regno);
3275 setup_reg_renumber ();
3277 calculate_allocation_cost ();
3279 #ifdef ENABLE_IRA_CHECKING
3280 if (ira_conflicts_p)
3281 check_allocation ();
3284 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3285 max_regno = max_reg_num ();
3287 /* And the reg_equiv_memory_loc array. */
3288 VEC_safe_grow (rtx, gc, reg_equiv_memory_loc_vec, max_regno);
3289 memset (VEC_address (rtx, reg_equiv_memory_loc_vec), 0,
3290 sizeof (rtx) * max_regno);
3291 reg_equiv_memory_loc = VEC_address (rtx, reg_equiv_memory_loc_vec);
3293 if (max_regno != max_regno_before_ira)
3295 regstat_free_n_sets_and_refs ();
3297 regstat_init_n_sets_and_refs ();
3298 regstat_compute_ri ();
3301 allocate_initial_values (reg_equiv_memory_loc);
3303 overall_cost_before = ira_overall_cost;
3304 if (ira_conflicts_p)
3306 fix_reg_equiv_init ();
3308 #ifdef ENABLE_IRA_CHECKING
3309 print_redundant_copies ();
3312 ira_spilled_reg_stack_slots_num = 0;
3313 ira_spilled_reg_stack_slots
3314 = ((struct ira_spilled_reg_stack_slot *)
3315 ira_allocate (max_regno
3316 * sizeof (struct ira_spilled_reg_stack_slot)));
3317 memset (ira_spilled_reg_stack_slots, 0,
3318 max_regno * sizeof (struct ira_spilled_reg_stack_slot));
3321 timevar_pop (TV_IRA);
3323 timevar_push (TV_RELOAD);
3324 df_set_flags (DF_NO_INSN_RESCAN);
3325 build_insn_chain ();
3327 reload_completed = !reload (get_insns (), ira_conflicts_p);
3329 finish_subregs_of_mode ();
3331 timevar_pop (TV_RELOAD);
3333 timevar_push (TV_IRA);
3335 if (ira_conflicts_p)
3337 ira_free (ira_spilled_reg_stack_slots);
3339 ira_finish_assign ();
3342 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
3343 && overall_cost_before != ira_overall_cost)
3344 fprintf (ira_dump_file, "+++Overall after reload %d\n", ira_overall_cost);
3347 flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots;
3349 flow_loops_free (&ira_loops);
3350 free_dominance_info (CDI_DOMINATORS);
3352 bb->loop_father = NULL;
3353 current_loops = NULL;
3356 regstat_free_n_sets_and_refs ();
3360 cleanup_cfg (CLEANUP_EXPENSIVE);
3362 ira_free (ira_reg_equiv_invariant_p);
3363 ira_free (ira_reg_equiv_const);
3366 bitmap_obstack_release (&ira_bitmap_obstack);
3367 #ifndef IRA_NO_OBSTACK
3368 obstack_free (&ira_obstack, NULL);
3371 /* The code after the reload has changed so much that at this point
3372 we might as well just rescan everything. Not that
3373 df_rescan_all_insns is not going to help here because it does not
3374 touch the artificial uses and defs. */
3375 df_finish_pass (true);
3377 df_live_add_problem ();
3378 df_scan_alloc (NULL);
3384 timevar_pop (TV_IRA);
3395 /* Run the integrated register allocator. */
3397 rest_of_handle_ira (void)
3403 struct rtl_opt_pass pass_ira =
3408 gate_ira, /* gate */
3409 rest_of_handle_ira, /* execute */
3412 0, /* static_pass_number */
3413 TV_NONE, /* tv_id */
3414 0, /* properties_required */
3415 0, /* properties_provided */
3416 0, /* properties_destroyed */
3417 0, /* todo_flags_start */
3419 TODO_ggc_collect /* todo_flags_finish */