1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "hard-reg-set.h"
36 #include "dominance.h"
38 #include "basic-block.h"
42 #include "insn-config.h"
51 #include "diagnostic-core.h"
57 typedef struct allocno_hard_regs *allocno_hard_regs_t;
59 /* The structure contains information about hard registers can be
60 assigned to allocnos. Usually it is allocno profitable hard
61 registers but in some cases this set can be a bit different. Major
62 reason of the difference is a requirement to use hard register sets
63 that form a tree or a forest (set of trees), i.e. hard register set
64 of a node should contain hard register sets of its subnodes. */
65 struct allocno_hard_regs
67 /* Hard registers can be assigned to an allocno. */
69 /* Overall (spilling) cost of all allocnos with given register
74 typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
76 /* A node representing allocno hard registers. Such nodes form a
77 forest (set of trees). Each subnode of given node in the forest
78 refers for hard register set (usually allocno profitable hard
79 register set) which is a subset of one referred from given
81 struct allocno_hard_regs_node
83 /* Set up number of the node in preorder traversing of the forest. */
85 /* Used for different calculation like finding conflict size of an
88 /* Used for calculation of conflict size of an allocno. The
89 conflict size of the allocno is maximal number of given allocno
90 hard registers needed for allocation of the conflicting allocnos.
91 Given allocno is trivially colored if this number plus the number
92 of hard registers needed for given allocno is not greater than
93 the number of given allocno hard register set. */
95 /* The number of hard registers given by member hard_regs. */
97 /* The following member is used to form the final forest. */
99 /* Pointer to the corresponding profitable hard registers. */
100 allocno_hard_regs_t hard_regs;
101 /* Parent, first subnode, previous and next node with the same
102 parent in the forest. */
103 allocno_hard_regs_node_t parent, first, prev, next;
106 /* Info about changing hard reg costs of an allocno. */
107 struct update_cost_record
109 /* Hard regno for which we changed the cost. */
111 /* Divisor used when we changed the cost of HARD_REGNO. */
113 /* Next record for given allocno. */
114 struct update_cost_record *next;
116 /* Pool allocation new operator. */
117 inline void *operator new (size_t)
119 return pool.allocate ();
122 /* Delete operator utilizing pool allocation. */
123 inline void operator delete (void *ptr)
125 pool.remove ((update_cost_record *) ptr);
128 /* Memory allocation pool. */
129 static pool_allocator<update_cost_record> pool;
132 /* To decrease footprint of ira_allocno structure we store all data
133 needed only for coloring in the following structure. */
134 struct allocno_color_data
136 /* TRUE value means that the allocno was not removed yet from the
137 conflicting graph during coloring. */
138 unsigned int in_graph_p : 1;
139 /* TRUE if it is put on the stack to make other allocnos
141 unsigned int may_be_spilled_p : 1;
142 /* TRUE if the allocno is trivially colorable. */
143 unsigned int colorable_p : 1;
144 /* Number of hard registers of the allocno class really
145 available for the allocno allocation. It is number of the
146 profitable hard regs. */
147 int available_regs_num;
148 /* Allocnos in a bucket (used in coloring) chained by the following
150 ira_allocno_t next_bucket_allocno;
151 ira_allocno_t prev_bucket_allocno;
152 /* Used for temporary purposes. */
154 /* Used to exclude repeated processing. */
156 /* Profitable hard regs available for this pseudo allocation. It
157 means that the set excludes unavailable hard regs and hard regs
158 conflicting with given pseudo. They should be of the allocno
160 HARD_REG_SET profitable_hard_regs;
161 /* The allocno hard registers node. */
162 allocno_hard_regs_node_t hard_regs_node;
163 /* Array of structures allocno_hard_regs_subnode representing
164 given allocno hard registers node (the 1st element in the array)
165 and all its subnodes in the tree (forest) of allocno hard
166 register nodes (see comments above). */
167 int hard_regs_subnodes_start;
168 /* The length of the previous array. */
169 int hard_regs_subnodes_num;
170 /* Records about updating allocno hard reg costs from copies. If
171 the allocno did not get expected hard register, these records are
172 used to restore original hard reg costs of allocnos connected to
173 this allocno by copies. */
174 struct update_cost_record *update_cost_records;
175 /* Threads. We collect allocnos connected by copies into threads
176 and try to assign hard regs to allocnos by threads. */
177 /* Allocno representing all thread. */
178 ira_allocno_t first_thread_allocno;
179 /* Allocnos in thread forms a cycle list through the following
181 ira_allocno_t next_thread_allocno;
182 /* All thread frequency. Defined only for first thread allocno. */
187 typedef struct allocno_color_data *allocno_color_data_t;
189 /* Container for storing allocno data concerning coloring. */
190 static allocno_color_data_t allocno_color_data;
192 /* Macro to access the data concerning coloring. */
193 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
195 /* Used for finding allocno colorability to exclude repeated allocno
196 processing and for updating preferencing to exclude repeated
197 allocno processing during assignment. */
198 static int curr_allocno_process;
200 /* This file contains code for regional graph coloring, spill/restore
201 code placement optimization, and code helping the reload pass to do
204 /* Bitmap of allocnos which should be colored. */
205 static bitmap coloring_allocno_bitmap;
207 /* Bitmap of allocnos which should be taken into account during
208 coloring. In general case it contains allocnos from
209 coloring_allocno_bitmap plus other already colored conflicting
211 static bitmap consideration_allocno_bitmap;
213 /* All allocnos sorted according their priorities. */
214 static ira_allocno_t *sorted_allocnos;
216 /* Vec representing the stack of allocnos used during coloring. */
217 static vec<ira_allocno_t> allocno_stack_vec;
219 /* Helper for qsort comparison callbacks - return a positive integer if
220 X > Y, or a negative value otherwise. Use a conditional expression
221 instead of a difference computation to insulate from possible overflow
222 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
223 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
227 /* Definition of vector of allocno hard registers. */
229 /* Vector of unique allocno hard registers. */
230 static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
232 struct allocno_hard_regs_hasher : typed_noop_remove <allocno_hard_regs>
234 typedef allocno_hard_regs *value_type;
235 typedef allocno_hard_regs *compare_type;
236 static inline hashval_t hash (const allocno_hard_regs *);
237 static inline bool equal (const allocno_hard_regs *,
238 const allocno_hard_regs *);
241 /* Returns hash value for allocno hard registers V. */
243 allocno_hard_regs_hasher::hash (const allocno_hard_regs *hv)
245 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
248 /* Compares allocno hard registers V1 and V2. */
250 allocno_hard_regs_hasher::equal (const allocno_hard_regs *hv1,
251 const allocno_hard_regs *hv2)
253 return hard_reg_set_equal_p (hv1->set, hv2->set);
256 /* Hash table of unique allocno hard registers. */
257 static hash_table<allocno_hard_regs_hasher> *allocno_hard_regs_htab;
259 /* Return allocno hard registers in the hash table equal to HV. */
260 static allocno_hard_regs_t
261 find_hard_regs (allocno_hard_regs_t hv)
263 return allocno_hard_regs_htab->find (hv);
266 /* Insert allocno hard registers HV in the hash table (if it is not
267 there yet) and return the value which in the table. */
268 static allocno_hard_regs_t
269 insert_hard_regs (allocno_hard_regs_t hv)
271 allocno_hard_regs **slot = allocno_hard_regs_htab->find_slot (hv, INSERT);
278 /* Initialize data concerning allocno hard registers. */
280 init_allocno_hard_regs (void)
282 allocno_hard_regs_vec.create (200);
283 allocno_hard_regs_htab
284 = new hash_table<allocno_hard_regs_hasher> (200);
287 /* Add (or update info about) allocno hard registers with SET and
289 static allocno_hard_regs_t
290 add_allocno_hard_regs (HARD_REG_SET set, int64_t cost)
292 struct allocno_hard_regs temp;
293 allocno_hard_regs_t hv;
295 gcc_assert (! hard_reg_set_empty_p (set));
296 COPY_HARD_REG_SET (temp.set, set);
297 if ((hv = find_hard_regs (&temp)) != NULL)
301 hv = ((struct allocno_hard_regs *)
302 ira_allocate (sizeof (struct allocno_hard_regs)));
303 COPY_HARD_REG_SET (hv->set, set);
305 allocno_hard_regs_vec.safe_push (hv);
306 insert_hard_regs (hv);
311 /* Finalize data concerning allocno hard registers. */
313 finish_allocno_hard_regs (void)
316 allocno_hard_regs_t hv;
319 allocno_hard_regs_vec.iterate (i, &hv);
322 delete allocno_hard_regs_htab;
323 allocno_hard_regs_htab = NULL;
324 allocno_hard_regs_vec.release ();
327 /* Sort hard regs according to their frequency of usage. */
329 allocno_hard_regs_compare (const void *v1p, const void *v2p)
331 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
332 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
334 if (hv2->cost > hv1->cost)
336 else if (hv2->cost < hv1->cost)
344 /* Used for finding a common ancestor of two allocno hard registers
345 nodes in the forest. We use the current value of
346 'node_check_tick' to mark all nodes from one node to the top and
347 then walking up from another node until we find a marked node.
349 It is also used to figure out allocno colorability as a mark that
350 we already reset value of member 'conflict_size' for the forest
351 node corresponding to the processed allocno. */
352 static int node_check_tick;
354 /* Roots of the forest containing hard register sets can be assigned
356 static allocno_hard_regs_node_t hard_regs_roots;
358 /* Definition of vector of allocno hard register nodes. */
360 /* Vector used to create the forest. */
361 static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
363 /* Create and return allocno hard registers node containing allocno
364 hard registers HV. */
365 static allocno_hard_regs_node_t
366 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
368 allocno_hard_regs_node_t new_node;
370 new_node = ((struct allocno_hard_regs_node *)
371 ira_allocate (sizeof (struct allocno_hard_regs_node)));
373 new_node->hard_regs = hv;
374 new_node->hard_regs_num = hard_reg_set_size (hv->set);
375 new_node->first = NULL;
376 new_node->used_p = false;
380 /* Add allocno hard registers node NEW_NODE to the forest on its level
383 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
384 allocno_hard_regs_node_t new_node)
386 new_node->next = *roots;
387 if (new_node->next != NULL)
388 new_node->next->prev = new_node;
389 new_node->prev = NULL;
393 /* Add allocno hard registers HV (or its best approximation if it is
394 not possible) to the forest on its level given by ROOTS. */
396 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
397 allocno_hard_regs_t hv)
399 unsigned int i, start;
400 allocno_hard_regs_node_t node, prev, new_node;
401 HARD_REG_SET temp_set;
402 allocno_hard_regs_t hv2;
404 start = hard_regs_node_vec.length ();
405 for (node = *roots; node != NULL; node = node->next)
407 if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
409 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
411 add_allocno_hard_regs_to_forest (&node->first, hv);
414 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
415 hard_regs_node_vec.safe_push (node);
416 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
418 COPY_HARD_REG_SET (temp_set, hv->set);
419 AND_HARD_REG_SET (temp_set, node->hard_regs->set);
420 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
421 add_allocno_hard_regs_to_forest (&node->first, hv2);
424 if (hard_regs_node_vec.length ()
427 /* Create a new node which contains nodes in hard_regs_node_vec. */
428 CLEAR_HARD_REG_SET (temp_set);
430 i < hard_regs_node_vec.length ();
433 node = hard_regs_node_vec[i];
434 IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
436 hv = add_allocno_hard_regs (temp_set, hv->cost);
437 new_node = create_new_allocno_hard_regs_node (hv);
440 i < hard_regs_node_vec.length ();
443 node = hard_regs_node_vec[i];
444 if (node->prev == NULL)
447 node->prev->next = node->next;
448 if (node->next != NULL)
449 node->next->prev = node->prev;
451 new_node->first = node;
458 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
460 hard_regs_node_vec.truncate (start);
463 /* Add allocno hard registers nodes starting with the forest level
464 given by FIRST which contains biggest set inside SET. */
466 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
469 allocno_hard_regs_node_t node;
471 ira_assert (first != NULL);
472 for (node = first; node != NULL; node = node->next)
473 if (hard_reg_set_subset_p (node->hard_regs->set, set))
474 hard_regs_node_vec.safe_push (node);
475 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
476 collect_allocno_hard_regs_cover (node->first, set);
479 /* Set up field parent as PARENT in all allocno hard registers nodes
480 in forest given by FIRST. */
482 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
483 allocno_hard_regs_node_t parent)
485 allocno_hard_regs_node_t node;
487 for (node = first; node != NULL; node = node->next)
489 node->parent = parent;
490 setup_allocno_hard_regs_nodes_parent (node->first, node);
494 /* Return allocno hard registers node which is a first common ancestor
495 node of FIRST and SECOND in the forest. */
496 static allocno_hard_regs_node_t
497 first_common_ancestor_node (allocno_hard_regs_node_t first,
498 allocno_hard_regs_node_t second)
500 allocno_hard_regs_node_t node;
503 for (node = first; node != NULL; node = node->parent)
504 node->check = node_check_tick;
505 for (node = second; node != NULL; node = node->parent)
506 if (node->check == node_check_tick)
508 return first_common_ancestor_node (second, first);
511 /* Print hard reg set SET to F. */
513 print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
517 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
519 if (TEST_HARD_REG_BIT (set, i))
521 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
525 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
528 fprintf (f, " %d", start);
529 else if (start == i - 2)
530 fprintf (f, " %d %d", start, start + 1);
532 fprintf (f, " %d-%d", start, i - 1);
540 /* Print allocno hard register subforest given by ROOTS and its LEVEL
543 print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
547 allocno_hard_regs_node_t node;
549 for (node = roots; node != NULL; node = node->next)
552 for (i = 0; i < level * 2; i++)
554 fprintf (f, "%d:(", node->preorder_num);
555 print_hard_reg_set (f, node->hard_regs->set, false);
556 fprintf (f, ")@%" PRId64"\n", node->hard_regs->cost);
557 print_hard_regs_subforest (f, node->first, level + 1);
561 /* Print the allocno hard register forest to F. */
563 print_hard_regs_forest (FILE *f)
565 fprintf (f, " Hard reg set forest:\n");
566 print_hard_regs_subforest (f, hard_regs_roots, 1);
569 /* Print the allocno hard register forest to stderr. */
571 ira_debug_hard_regs_forest (void)
573 print_hard_regs_forest (stderr);
576 /* Remove unused allocno hard registers nodes from forest given by its
579 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
581 allocno_hard_regs_node_t node, prev, next, last;
583 for (prev = NULL, node = *roots; node != NULL; node = next)
588 remove_unused_allocno_hard_regs_nodes (&node->first);
593 for (last = node->first;
594 last != NULL && last->next != NULL;
600 *roots = node->first;
602 prev->next = node->first;
622 /* Set up fields preorder_num starting with START_NUM in all allocno
623 hard registers nodes in forest given by FIRST. Return biggest set
624 PREORDER_NUM increased by 1. */
626 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
627 allocno_hard_regs_node_t parent,
630 allocno_hard_regs_node_t node;
632 for (node = first; node != NULL; node = node->next)
634 node->preorder_num = start_num++;
635 node->parent = parent;
636 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
642 /* Number of allocno hard registers nodes in the forest. */
643 static int allocno_hard_regs_nodes_num;
645 /* Table preorder number of allocno hard registers node in the forest
646 -> the allocno hard registers node. */
647 static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
650 typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
652 /* The structure is used to describes all subnodes (not only immediate
653 ones) in the mentioned above tree for given allocno hard register
654 node. The usage of such data accelerates calculation of
655 colorability of given allocno. */
656 struct allocno_hard_regs_subnode
658 /* The conflict size of conflicting allocnos whose hard register
659 sets are equal sets (plus supersets if given node is given
660 allocno hard registers node) of one in the given node. */
661 int left_conflict_size;
662 /* The summary conflict size of conflicting allocnos whose hard
663 register sets are strict subsets of one in the given node.
664 Overall conflict size is
665 left_conflict_subnodes_size
666 + MIN (max_node_impact - left_conflict_subnodes_size,
669 short left_conflict_subnodes_size;
670 short max_node_impact;
673 /* Container for hard regs subnodes of all allocnos. */
674 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
676 /* Table (preorder number of allocno hard registers node in the
677 forest, preorder number of allocno hard registers subnode) -> index
678 of the subnode relative to the node. -1 if it is not a
680 static int *allocno_hard_regs_subnode_index;
682 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
683 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
685 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
687 allocno_hard_regs_node_t node, parent;
690 for (node = first; node != NULL; node = node->next)
692 allocno_hard_regs_nodes[node->preorder_num] = node;
693 for (parent = node; parent != NULL; parent = parent->parent)
695 index = parent->preorder_num * allocno_hard_regs_nodes_num;
696 allocno_hard_regs_subnode_index[index + node->preorder_num]
697 = node->preorder_num - parent->preorder_num;
699 setup_allocno_hard_regs_subnode_index (node->first);
703 /* Count all allocno hard registers nodes in tree ROOT. */
705 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
709 for (root = root->first; root != NULL; root = root->next)
710 len += get_allocno_hard_regs_subnodes_num (root);
714 /* Build the forest of allocno hard registers nodes and assign each
715 allocno a node from the forest. */
717 form_allocno_hard_regs_nodes_forest (void)
719 unsigned int i, j, size, len;
722 allocno_hard_regs_t hv;
725 allocno_hard_regs_node_t node, allocno_hard_regs_node;
726 allocno_color_data_t allocno_data;
729 init_allocno_hard_regs ();
730 hard_regs_roots = NULL;
731 hard_regs_node_vec.create (100);
732 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
733 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
735 CLEAR_HARD_REG_SET (temp);
736 SET_HARD_REG_BIT (temp, i);
737 hv = add_allocno_hard_regs (temp, 0);
738 node = create_new_allocno_hard_regs_node (hv);
739 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
741 start = allocno_hard_regs_vec.length ();
742 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
745 allocno_data = ALLOCNO_COLOR_DATA (a);
747 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
749 hv = (add_allocno_hard_regs
750 (allocno_data->profitable_hard_regs,
751 ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a)));
753 SET_HARD_REG_SET (temp);
754 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
755 add_allocno_hard_regs (temp, 0);
756 qsort (allocno_hard_regs_vec.address () + start,
757 allocno_hard_regs_vec.length () - start,
758 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
760 allocno_hard_regs_vec.iterate (i, &hv);
763 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
764 ira_assert (hard_regs_node_vec.length () == 0);
766 /* We need to set up parent fields for right work of
767 first_common_ancestor_node. */
768 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULL);
769 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
772 allocno_data = ALLOCNO_COLOR_DATA (a);
773 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
775 hard_regs_node_vec.truncate (0);
776 collect_allocno_hard_regs_cover (hard_regs_roots,
777 allocno_data->profitable_hard_regs);
778 allocno_hard_regs_node = NULL;
779 for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
780 allocno_hard_regs_node
783 : first_common_ancestor_node (node, allocno_hard_regs_node));
784 /* That is a temporary storage. */
785 allocno_hard_regs_node->used_p = true;
786 allocno_data->hard_regs_node = allocno_hard_regs_node;
788 ira_assert (hard_regs_roots->next == NULL);
789 hard_regs_roots->used_p = true;
790 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
791 allocno_hard_regs_nodes_num
792 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULL, 0);
793 allocno_hard_regs_nodes
794 = ((allocno_hard_regs_node_t *)
795 ira_allocate (allocno_hard_regs_nodes_num
796 * sizeof (allocno_hard_regs_node_t)));
797 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
798 allocno_hard_regs_subnode_index
799 = (int *) ira_allocate (size * sizeof (int));
800 for (i = 0; i < size; i++)
801 allocno_hard_regs_subnode_index[i] = -1;
802 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
804 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
807 allocno_data = ALLOCNO_COLOR_DATA (a);
808 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
810 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
811 allocno_data->hard_regs_subnodes_start = start;
812 allocno_data->hard_regs_subnodes_num = len;
815 allocno_hard_regs_subnodes
816 = ((allocno_hard_regs_subnode_t)
817 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
818 hard_regs_node_vec.release ();
821 /* Free tree of allocno hard registers nodes given by its ROOT. */
823 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
825 allocno_hard_regs_node_t child, next;
827 for (child = root->first; child != NULL; child = next)
830 finish_allocno_hard_regs_nodes_tree (child);
835 /* Finish work with the forest of allocno hard registers nodes. */
837 finish_allocno_hard_regs_nodes_forest (void)
839 allocno_hard_regs_node_t node, next;
841 ira_free (allocno_hard_regs_subnodes);
842 for (node = hard_regs_roots; node != NULL; node = next)
845 finish_allocno_hard_regs_nodes_tree (node);
847 ira_free (allocno_hard_regs_nodes);
848 ira_free (allocno_hard_regs_subnode_index);
849 finish_allocno_hard_regs ();
852 /* Set up left conflict sizes and left conflict subnodes sizes of hard
853 registers subnodes of allocno A. Return TRUE if allocno A is
854 trivially colorable. */
856 setup_left_conflict_sizes_p (ira_allocno_t a)
858 int i, k, nobj, start;
859 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
860 allocno_color_data_t data;
861 HARD_REG_SET profitable_hard_regs;
862 allocno_hard_regs_subnode_t subnodes;
863 allocno_hard_regs_node_t node;
864 HARD_REG_SET node_set;
866 nobj = ALLOCNO_NUM_OBJECTS (a);
867 data = ALLOCNO_COLOR_DATA (a);
868 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
869 COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
870 node = data->hard_regs_node;
871 node_preorder_num = node->preorder_num;
872 COPY_HARD_REG_SET (node_set, node->hard_regs->set);
874 for (k = 0; k < nobj; k++)
876 ira_object_t obj = ALLOCNO_OBJECT (a, k);
877 ira_object_t conflict_obj;
878 ira_object_conflict_iterator oci;
880 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
883 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
884 allocno_hard_regs_node_t conflict_node, temp_node;
885 HARD_REG_SET conflict_node_set;
886 allocno_color_data_t conflict_data;
888 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
889 if (! ALLOCNO_COLOR_DATA (conflict_a)->in_graph_p
890 || ! hard_reg_set_intersect_p (profitable_hard_regs,
892 ->profitable_hard_regs))
894 conflict_node = conflict_data->hard_regs_node;
895 COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
896 if (hard_reg_set_subset_p (node_set, conflict_node_set))
900 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set));
901 temp_node = conflict_node;
903 if (temp_node->check != node_check_tick)
905 temp_node->check = node_check_tick;
906 temp_node->conflict_size = 0;
908 size = (ira_reg_class_max_nregs
909 [ALLOCNO_CLASS (conflict_a)][ALLOCNO_MODE (conflict_a)]);
910 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
911 /* We will deal with the subwords individually. */
913 temp_node->conflict_size += size;
916 for (i = 0; i < data->hard_regs_subnodes_num; i++)
918 allocno_hard_regs_node_t temp_node;
920 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
921 ira_assert (temp_node->preorder_num == i + node_preorder_num);
922 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
923 ? 0 : temp_node->conflict_size);
924 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
925 profitable_hard_regs))
926 subnodes[i].max_node_impact = temp_node->hard_regs_num;
929 HARD_REG_SET temp_set;
930 int j, n, hard_regno;
931 enum reg_class aclass;
933 COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
934 AND_HARD_REG_SET (temp_set, profitable_hard_regs);
935 aclass = ALLOCNO_CLASS (a);
936 for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
938 hard_regno = ira_class_hard_regs[aclass][j];
939 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
942 subnodes[i].max_node_impact = n;
944 subnodes[i].left_conflict_subnodes_size = 0;
946 start = node_preorder_num * allocno_hard_regs_nodes_num;
947 for (i = data->hard_regs_subnodes_num - 1; i > 0; i--)
950 allocno_hard_regs_node_t parent;
952 size = (subnodes[i].left_conflict_subnodes_size
953 + MIN (subnodes[i].max_node_impact
954 - subnodes[i].left_conflict_subnodes_size,
955 subnodes[i].left_conflict_size));
956 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
957 gcc_checking_assert(parent);
959 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
960 gcc_checking_assert(parent_i >= 0);
961 subnodes[parent_i].left_conflict_subnodes_size += size;
963 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
965 = (left_conflict_subnodes_size
966 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,
967 subnodes[0].left_conflict_size));
968 conflict_size += ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
969 data->colorable_p = conflict_size <= data->available_regs_num;
970 return data->colorable_p;
973 /* Update left conflict sizes of hard registers subnodes of allocno A
974 after removing allocno REMOVED_A with SIZE from the conflict graph.
975 Return TRUE if A is trivially colorable. */
977 update_left_conflict_sizes_p (ira_allocno_t a,
978 ira_allocno_t removed_a, int size)
980 int i, conflict_size, before_conflict_size, diff, start;
981 int node_preorder_num, parent_i;
982 allocno_hard_regs_node_t node, removed_node, parent;
983 allocno_hard_regs_subnode_t subnodes;
984 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
986 ira_assert (! data->colorable_p);
987 node = data->hard_regs_node;
988 node_preorder_num = node->preorder_num;
989 removed_node = ALLOCNO_COLOR_DATA (removed_a)->hard_regs_node;
990 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,
991 node->hard_regs->set)
992 || hard_reg_set_subset_p (node->hard_regs->set,
993 removed_node->hard_regs->set));
994 start = node_preorder_num * allocno_hard_regs_nodes_num;
995 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
998 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
1000 = (subnodes[i].left_conflict_subnodes_size
1001 + MIN (subnodes[i].max_node_impact
1002 - subnodes[i].left_conflict_subnodes_size,
1003 subnodes[i].left_conflict_size));
1004 subnodes[i].left_conflict_size -= size;
1008 = (subnodes[i].left_conflict_subnodes_size
1009 + MIN (subnodes[i].max_node_impact
1010 - subnodes[i].left_conflict_subnodes_size,
1011 subnodes[i].left_conflict_size));
1012 if ((diff = before_conflict_size - conflict_size) == 0)
1014 ira_assert (conflict_size < before_conflict_size);
1015 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
1019 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
1023 before_conflict_size
1024 = (subnodes[i].left_conflict_subnodes_size
1025 + MIN (subnodes[i].max_node_impact
1026 - subnodes[i].left_conflict_subnodes_size,
1027 subnodes[i].left_conflict_size));
1028 subnodes[i].left_conflict_subnodes_size -= diff;
1032 + ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
1033 > data->available_regs_num))
1035 data->colorable_p = true;
1039 /* Return true if allocno A has empty profitable hard regs. */
1041 empty_profitable_hard_regs (ira_allocno_t a)
1043 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1045 return hard_reg_set_empty_p (data->profitable_hard_regs);
1048 /* Set up profitable hard registers for each allocno being
1051 setup_profitable_hard_regs (void)
1054 int j, k, nobj, hard_regno, nregs, class_size;
1057 enum reg_class aclass;
1059 allocno_color_data_t data;
1061 /* Initial set up from allocno classes and explicitly conflicting
1063 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1065 a = ira_allocnos[i];
1066 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS)
1068 data = ALLOCNO_COLOR_DATA (a);
1069 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL
1070 && ALLOCNO_CLASS_COST (a) > ALLOCNO_MEMORY_COST (a))
1071 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1074 mode = ALLOCNO_MODE (a);
1075 COPY_HARD_REG_SET (data->profitable_hard_regs,
1076 ira_useful_class_mode_regs[aclass][mode]);
1077 nobj = ALLOCNO_NUM_OBJECTS (a);
1078 for (k = 0; k < nobj; k++)
1080 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1082 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1083 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1087 /* Exclude hard regs already assigned for conflicting objects. */
1088 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)
1090 a = ira_allocnos[i];
1091 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1092 || ! ALLOCNO_ASSIGNED_P (a)
1093 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1095 mode = ALLOCNO_MODE (a);
1096 nregs = hard_regno_nregs[hard_regno][mode];
1097 nobj = ALLOCNO_NUM_OBJECTS (a);
1098 for (k = 0; k < nobj; k++)
1100 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1101 ira_object_t conflict_obj;
1102 ira_object_conflict_iterator oci;
1104 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1106 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1108 /* We can process the conflict allocno repeatedly with
1110 if (nregs == nobj && nregs > 1)
1112 int num = OBJECT_SUBWORD (conflict_obj);
1114 if (REG_WORDS_BIG_ENDIAN)
1116 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1117 hard_regno + nobj - num - 1);
1120 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1124 AND_COMPL_HARD_REG_SET
1125 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1126 ira_reg_mode_hard_regset[hard_regno][mode]);
1130 /* Exclude too costly hard regs. */
1131 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1133 int min_cost = INT_MAX;
1136 a = ira_allocnos[i];
1137 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1138 || empty_profitable_hard_regs (a))
1140 data = ALLOCNO_COLOR_DATA (a);
1141 mode = ALLOCNO_MODE (a);
1142 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)) != NULL
1143 || (costs = ALLOCNO_HARD_REG_COSTS (a)) != NULL)
1145 class_size = ira_class_hard_regs_num[aclass];
1146 for (j = 0; j < class_size; j++)
1148 hard_regno = ira_class_hard_regs[aclass][j];
1149 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1152 if (ALLOCNO_UPDATED_MEMORY_COST (a) < costs[j])
1153 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1155 else if (min_cost > costs[j])
1156 min_cost = costs[j];
1159 else if (ALLOCNO_UPDATED_MEMORY_COST (a)
1160 < ALLOCNO_UPDATED_CLASS_COST (a))
1161 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1162 if (ALLOCNO_UPDATED_CLASS_COST (a) > min_cost)
1163 ALLOCNO_UPDATED_CLASS_COST (a) = min_cost;
1169 /* This page contains functions used to choose hard registers for
1172 /* Pool for update cost records. */
1173 static pool_allocator<update_cost_record> update_cost_record_pool
1174 ("update cost records", 100);
1176 /* Return new update cost record with given params. */
1177 static struct update_cost_record *
1178 get_update_cost_record (int hard_regno, int divisor,
1179 struct update_cost_record *next)
1181 struct update_cost_record *record;
1183 record = update_cost_record_pool.allocate ();
1184 record->hard_regno = hard_regno;
1185 record->divisor = divisor;
1186 record->next = next;
1190 /* Free memory for all records in LIST. */
1192 free_update_cost_record_list (struct update_cost_record *list)
1194 struct update_cost_record *next;
1196 while (list != NULL)
1199 update_cost_record_pool.remove (list);
1204 /* Free memory allocated for all update cost records. */
1206 finish_update_cost_records (void)
1208 update_cost_record_pool.release ();
1211 /* Array whose element value is TRUE if the corresponding hard
1212 register was already allocated for an allocno. */
1213 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
1215 /* Describes one element in a queue of allocnos whose costs need to be
1216 updated. Each allocno in the queue is known to have an allocno
1218 struct update_cost_queue_elem
1220 /* This element is in the queue iff CHECK == update_cost_check. */
1223 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1224 connecting this allocno to the one being allocated. */
1227 /* Allocno from which we are chaining costs of connected allocnos.
1228 It is used not go back in graph of allocnos connected by
1232 /* The next allocno in the queue, or null if this is the last element. */
1236 /* The first element in a queue of allocnos whose copy costs need to be
1237 updated. Null if the queue is empty. */
1238 static ira_allocno_t update_cost_queue;
1240 /* The last element in the queue described by update_cost_queue.
1241 Not valid if update_cost_queue is null. */
1242 static struct update_cost_queue_elem *update_cost_queue_tail;
1244 /* A pool of elements in the queue described by update_cost_queue.
1245 Elements are indexed by ALLOCNO_NUM. */
1246 static struct update_cost_queue_elem *update_cost_queue_elems;
1248 /* The current value of update_costs_from_copies call count. */
1249 static int update_cost_check;
1251 /* Allocate and initialize data necessary for function
1252 update_costs_from_copies. */
1254 initiate_cost_update (void)
1258 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1259 update_cost_queue_elems
1260 = (struct update_cost_queue_elem *) ira_allocate (size);
1261 memset (update_cost_queue_elems, 0, size);
1262 update_cost_check = 0;
1265 /* Deallocate data used by function update_costs_from_copies. */
1267 finish_cost_update (void)
1269 ira_free (update_cost_queue_elems);
1270 finish_update_cost_records ();
1273 /* When we traverse allocnos to update hard register costs, the cost
1274 divisor will be multiplied by the following macro value for each
1275 hop from given allocno to directly connected allocnos. */
1276 #define COST_HOP_DIVISOR 4
1278 /* Start a new cost-updating pass. */
1280 start_update_cost (void)
1282 update_cost_check++;
1283 update_cost_queue = NULL;
1286 /* Add (ALLOCNO, FROM, DIVISOR) to the end of update_cost_queue, unless
1287 ALLOCNO is already in the queue, or has NO_REGS class. */
1289 queue_update_cost (ira_allocno_t allocno, ira_allocno_t from, int divisor)
1291 struct update_cost_queue_elem *elem;
1293 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
1294 if (elem->check != update_cost_check
1295 && ALLOCNO_CLASS (allocno) != NO_REGS)
1297 elem->check = update_cost_check;
1299 elem->divisor = divisor;
1301 if (update_cost_queue == NULL)
1302 update_cost_queue = allocno;
1304 update_cost_queue_tail->next = allocno;
1305 update_cost_queue_tail = elem;
1309 /* Try to remove the first element from update_cost_queue. Return
1310 false if the queue was empty, otherwise make (*ALLOCNO, *FROM,
1311 *DIVISOR) describe the removed element. */
1313 get_next_update_cost (ira_allocno_t *allocno, ira_allocno_t *from, int *divisor)
1315 struct update_cost_queue_elem *elem;
1317 if (update_cost_queue == NULL)
1320 *allocno = update_cost_queue;
1321 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
1323 *divisor = elem->divisor;
1324 update_cost_queue = elem->next;
1328 /* Increase costs of HARD_REGNO by UPDATE_COST for ALLOCNO. Return
1329 true if we really modified the cost. */
1331 update_allocno_cost (ira_allocno_t allocno, int hard_regno, int update_cost)
1334 enum reg_class aclass = ALLOCNO_CLASS (allocno);
1336 i = ira_class_hard_reg_index[aclass][hard_regno];
1339 ira_allocate_and_set_or_copy_costs
1340 (&ALLOCNO_UPDATED_HARD_REG_COSTS (allocno), aclass,
1341 ALLOCNO_UPDATED_CLASS_COST (allocno),
1342 ALLOCNO_HARD_REG_COSTS (allocno));
1343 ira_allocate_and_set_or_copy_costs
1344 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno),
1345 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (allocno));
1346 ALLOCNO_UPDATED_HARD_REG_COSTS (allocno)[i] += update_cost;
1347 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno)[i] += update_cost;
1351 /* Update (decrease if DECR_P) HARD_REGNO cost of allocnos connected
1352 by copies to ALLOCNO to increase chances to remove some copies as
1353 the result of subsequent assignment. Record cost updates if
1354 RECORD_P is true. */
1356 update_costs_from_allocno (ira_allocno_t allocno, int hard_regno,
1357 int divisor, bool decr_p, bool record_p)
1359 int cost, update_cost;
1361 enum reg_class rclass, aclass;
1362 ira_allocno_t another_allocno, from = NULL;
1363 ira_copy_t cp, next_cp;
1365 rclass = REGNO_REG_CLASS (hard_regno);
1368 mode = ALLOCNO_MODE (allocno);
1369 ira_init_register_move_cost_if_necessary (mode);
1370 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1372 if (cp->first == allocno)
1374 next_cp = cp->next_first_allocno_copy;
1375 another_allocno = cp->second;
1377 else if (cp->second == allocno)
1379 next_cp = cp->next_second_allocno_copy;
1380 another_allocno = cp->first;
1385 if (another_allocno == from)
1388 aclass = ALLOCNO_CLASS (another_allocno);
1389 if (! TEST_HARD_REG_BIT (reg_class_contents[aclass],
1391 || ALLOCNO_ASSIGNED_P (another_allocno))
1394 cost = (cp->second == allocno
1395 ? ira_register_move_cost[mode][rclass][aclass]
1396 : ira_register_move_cost[mode][aclass][rclass]);
1400 update_cost = cp->freq * cost / divisor;
1401 if (update_cost == 0)
1404 if (! update_allocno_cost (another_allocno, hard_regno, update_cost))
1406 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1407 if (record_p && ALLOCNO_COLOR_DATA (another_allocno) != NULL)
1408 ALLOCNO_COLOR_DATA (another_allocno)->update_cost_records
1409 = get_update_cost_record (hard_regno, divisor,
1410 ALLOCNO_COLOR_DATA (another_allocno)
1411 ->update_cost_records);
1414 while (get_next_update_cost (&allocno, &from, &divisor));
1417 /* Decrease preferred ALLOCNO hard register costs and costs of
1418 allocnos connected to ALLOCNO through copy. */
1420 update_costs_from_prefs (ira_allocno_t allocno)
1424 start_update_cost ();
1425 for (pref = ALLOCNO_PREFS (allocno); pref != NULL; pref = pref->next_pref)
1426 update_costs_from_allocno (allocno, pref->hard_regno,
1427 COST_HOP_DIVISOR, true, true);
1430 /* Update (decrease if DECR_P) the cost of allocnos connected to
1431 ALLOCNO through copies to increase chances to remove some copies as
1432 the result of subsequent assignment. ALLOCNO was just assigned to
1433 a hard register. Record cost updates if RECORD_P is true. */
1435 update_costs_from_copies (ira_allocno_t allocno, bool decr_p, bool record_p)
1439 hard_regno = ALLOCNO_HARD_REGNO (allocno);
1440 ira_assert (hard_regno >= 0 && ALLOCNO_CLASS (allocno) != NO_REGS);
1441 start_update_cost ();
1442 update_costs_from_allocno (allocno, hard_regno, 1, decr_p, record_p);
1445 /* Restore costs of allocnos connected to ALLOCNO by copies as it was
1446 before updating costs of these allocnos from given allocno. This
1447 is a wise thing to do as if given allocno did not get an expected
1448 hard reg, using smaller cost of the hard reg for allocnos connected
1449 by copies to given allocno becomes actually misleading. Free all
1450 update cost records for ALLOCNO as we don't need them anymore. */
1452 restore_costs_from_copies (ira_allocno_t allocno)
1454 struct update_cost_record *records, *curr;
1456 if (ALLOCNO_COLOR_DATA (allocno) == NULL)
1458 records = ALLOCNO_COLOR_DATA (allocno)->update_cost_records;
1459 start_update_cost ();
1460 for (curr = records; curr != NULL; curr = curr->next)
1461 update_costs_from_allocno (allocno, curr->hard_regno,
1462 curr->divisor, true, false);
1463 free_update_cost_record_list (records);
1464 ALLOCNO_COLOR_DATA (allocno)->update_cost_records = NULL;
1467 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1468 of ACLASS by conflict costs of the unassigned allocnos
1469 connected by copies with allocnos in update_cost_queue. This
1470 update increases chances to remove some copies. */
1472 update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
1475 int i, cost, class_size, freq, mult, div, divisor;
1476 int index, hard_regno;
1477 int *conflict_costs;
1479 enum reg_class another_aclass;
1480 ira_allocno_t allocno, another_allocno, from;
1481 ira_copy_t cp, next_cp;
1483 while (get_next_update_cost (&allocno, &from, &divisor))
1484 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1486 if (cp->first == allocno)
1488 next_cp = cp->next_first_allocno_copy;
1489 another_allocno = cp->second;
1491 else if (cp->second == allocno)
1493 next_cp = cp->next_second_allocno_copy;
1494 another_allocno = cp->first;
1499 if (another_allocno == from)
1502 another_aclass = ALLOCNO_CLASS (another_allocno);
1503 if (! ira_reg_classes_intersect_p[aclass][another_aclass]
1504 || ALLOCNO_ASSIGNED_P (another_allocno)
1505 || ALLOCNO_COLOR_DATA (another_allocno)->may_be_spilled_p)
1507 class_size = ira_class_hard_regs_num[another_aclass];
1508 ira_allocate_and_copy_costs
1509 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1510 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1512 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
1513 if (conflict_costs == NULL)
1518 freq = ALLOCNO_FREQ (another_allocno);
1521 div = freq * divisor;
1523 for (i = class_size - 1; i >= 0; i--)
1525 hard_regno = ira_class_hard_regs[another_aclass][i];
1526 ira_assert (hard_regno >= 0);
1527 index = ira_class_hard_reg_index[aclass][hard_regno];
1530 cost = (int) ((unsigned) conflict_costs [i] * mult) / div;
1536 costs[index] += cost;
1539 /* Probably 5 hops will be enough. */
1541 && divisor <= (COST_HOP_DIVISOR
1544 * COST_HOP_DIVISOR))
1545 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1549 /* Set up conflicting (through CONFLICT_REGS) for each object of
1550 allocno A and the start allocno profitable regs (through
1551 START_PROFITABLE_REGS). Remember that the start profitable regs
1552 exclude hard regs which can not hold value of mode of allocno A.
1553 This covers mostly cases when multi-register value should be
1556 get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1557 HARD_REG_SET *conflict_regs,
1558 HARD_REG_SET *start_profitable_regs)
1563 nwords = ALLOCNO_NUM_OBJECTS (a);
1564 for (i = 0; i < nwords; i++)
1566 obj = ALLOCNO_OBJECT (a, i);
1567 COPY_HARD_REG_SET (conflict_regs[i],
1568 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1572 COPY_HARD_REG_SET (*start_profitable_regs,
1573 reg_class_contents[ALLOCNO_CLASS (a)]);
1574 AND_COMPL_HARD_REG_SET (*start_profitable_regs,
1575 ira_prohibited_class_mode_regs
1576 [ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
1579 COPY_HARD_REG_SET (*start_profitable_regs,
1580 ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
1583 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1584 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1586 check_hard_reg_p (ira_allocno_t a, int hard_regno,
1587 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1589 int j, nwords, nregs;
1590 enum reg_class aclass;
1593 aclass = ALLOCNO_CLASS (a);
1594 mode = ALLOCNO_MODE (a);
1595 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[aclass][mode],
1598 /* Checking only profitable hard regs. */
1599 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1601 nregs = hard_regno_nregs[hard_regno][mode];
1602 nwords = ALLOCNO_NUM_OBJECTS (a);
1603 for (j = 0; j < nregs; j++)
1606 int set_to_test_start = 0, set_to_test_end = nwords;
1608 if (nregs == nwords)
1610 if (REG_WORDS_BIG_ENDIAN)
1611 set_to_test_start = nwords - j - 1;
1613 set_to_test_start = j;
1614 set_to_test_end = set_to_test_start + 1;
1616 for (k = set_to_test_start; k < set_to_test_end; k++)
1617 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1619 if (k != set_to_test_end)
1625 /* Return number of registers needed to be saved and restored at
1626 function prologue/epilogue if we allocate HARD_REGNO to hold value
1629 calculate_saved_nregs (int hard_regno, machine_mode mode)
1634 ira_assert (hard_regno >= 0);
1635 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1636 if (!allocated_hardreg_p[hard_regno + i]
1637 && !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
1638 && !LOCAL_REGNO (hard_regno + i))
1643 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1644 that the function called from function
1645 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1646 this case some allocno data are not defined or updated and we
1647 should not touch these data. The function returns true if we
1648 managed to assign a hard register to the allocno.
1650 To assign a hard register, first of all we calculate all conflict
1651 hard registers which can come from conflicting allocnos with
1652 already assigned hard registers. After that we find first free
1653 hard register with the minimal cost. During hard register cost
1654 calculation we take conflict hard register costs into account to
1655 give a chance for conflicting allocnos to get a better hard
1656 register in the future.
1658 If the best hard register cost is bigger than cost of memory usage
1659 for the allocno, we don't assign a hard register to given allocno
1662 If we assign a hard register to the allocno, we update costs of the
1663 hard register for allocnos connected by copies to improve a chance
1664 to coalesce insns represented by the copies when we assign hard
1665 registers to the allocnos connected by the copies. */
1667 assign_hard_reg (ira_allocno_t a, bool retry_p)
1669 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1670 int i, j, hard_regno, best_hard_regno, class_size;
1671 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
1673 enum reg_class aclass;
1675 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
1677 enum reg_class rclass;
1680 bool no_stack_reg_p;
1683 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1684 get_conflict_and_start_profitable_regs (a, retry_p,
1686 &profitable_hard_regs);
1687 aclass = ALLOCNO_CLASS (a);
1688 class_size = ira_class_hard_regs_num[aclass];
1689 best_hard_regno = -1;
1690 memset (full_costs, 0, sizeof (int) * class_size);
1692 memset (costs, 0, sizeof (int) * class_size);
1693 memset (full_costs, 0, sizeof (int) * class_size);
1695 no_stack_reg_p = false;
1698 start_update_cost ();
1699 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
1701 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1702 aclass, ALLOCNO_HARD_REG_COSTS (a));
1703 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
1705 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
1707 cost = ALLOCNO_UPDATED_CLASS_COST (a);
1708 for (i = 0; i < class_size; i++)
1709 if (a_costs != NULL)
1711 costs[i] += a_costs[i];
1712 full_costs[i] += a_costs[i];
1717 full_costs[i] += cost;
1719 nwords = ALLOCNO_NUM_OBJECTS (a);
1720 curr_allocno_process++;
1721 for (word = 0; word < nwords; word++)
1723 ira_object_t conflict_obj;
1724 ira_object_t obj = ALLOCNO_OBJECT (a, word);
1725 ira_object_conflict_iterator oci;
1727 /* Take preferences of conflicting allocnos into account. */
1728 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1730 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1731 enum reg_class conflict_aclass;
1732 allocno_color_data_t data = ALLOCNO_COLOR_DATA (conflict_a);
1734 /* Reload can give another class so we need to check all
1737 && (!bitmap_bit_p (consideration_allocno_bitmap,
1738 ALLOCNO_NUM (conflict_a))
1739 || ((!ALLOCNO_ASSIGNED_P (conflict_a)
1740 || ALLOCNO_HARD_REGNO (conflict_a) < 0)
1741 && !(hard_reg_set_intersect_p
1742 (profitable_hard_regs,
1744 (conflict_a)->profitable_hard_regs)))))
1746 conflict_aclass = ALLOCNO_CLASS (conflict_a);
1747 ira_assert (ira_reg_classes_intersect_p
1748 [aclass][conflict_aclass]);
1749 if (ALLOCNO_ASSIGNED_P (conflict_a))
1751 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
1753 && (ira_hard_reg_set_intersection_p
1754 (hard_regno, ALLOCNO_MODE (conflict_a),
1755 reg_class_contents[aclass])))
1757 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
1760 mode = ALLOCNO_MODE (conflict_a);
1761 conflict_nregs = hard_regno_nregs[hard_regno][mode];
1762 if (conflict_nregs == n_objects && conflict_nregs > 1)
1764 int num = OBJECT_SUBWORD (conflict_obj);
1766 if (REG_WORDS_BIG_ENDIAN)
1767 SET_HARD_REG_BIT (conflicting_regs[word],
1768 hard_regno + n_objects - num - 1);
1770 SET_HARD_REG_BIT (conflicting_regs[word],
1775 (conflicting_regs[word],
1776 ira_reg_mode_hard_regset[hard_regno][mode]);
1777 if (hard_reg_set_subset_p (profitable_hard_regs,
1778 conflicting_regs[word]))
1783 && ! ALLOCNO_COLOR_DATA (conflict_a)->may_be_spilled_p
1784 /* Don't process the conflict allocno twice. */
1785 && (ALLOCNO_COLOR_DATA (conflict_a)->last_process
1786 != curr_allocno_process))
1788 int k, *conflict_costs;
1790 ALLOCNO_COLOR_DATA (conflict_a)->last_process
1791 = curr_allocno_process;
1792 ira_allocate_and_copy_costs
1793 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
1795 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
1797 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
1798 if (conflict_costs != NULL)
1799 for (j = class_size - 1; j >= 0; j--)
1801 hard_regno = ira_class_hard_regs[aclass][j];
1802 ira_assert (hard_regno >= 0);
1803 k = ira_class_hard_reg_index[conflict_aclass][hard_regno];
1805 /* If HARD_REGNO is not available for CONFLICT_A,
1806 the conflict would be ignored, since HARD_REGNO
1807 will never be assigned to CONFLICT_A. */
1808 || !TEST_HARD_REG_BIT (data->profitable_hard_regs,
1811 full_costs[j] -= conflict_costs[k];
1813 queue_update_cost (conflict_a, NULL, COST_HOP_DIVISOR);
1819 /* Take into account preferences of allocnos connected by copies to
1820 the conflict allocnos. */
1821 update_conflict_hard_regno_costs (full_costs, aclass, true);
1823 /* Take preferences of allocnos connected by copies into
1827 start_update_cost ();
1828 queue_update_cost (a, NULL, COST_HOP_DIVISOR);
1829 update_conflict_hard_regno_costs (full_costs, aclass, false);
1831 min_cost = min_full_cost = INT_MAX;
1832 /* We don't care about giving callee saved registers to allocnos no
1833 living through calls because call clobbered registers are
1834 allocated first (it is usual practice to put them first in
1835 REG_ALLOC_ORDER). */
1836 mode = ALLOCNO_MODE (a);
1837 for (i = 0; i < class_size; i++)
1839 hard_regno = ira_class_hard_regs[aclass][i];
1842 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
1845 if (! check_hard_reg_p (a, hard_regno,
1846 conflicting_regs, profitable_hard_regs))
1849 full_cost = full_costs[i];
1850 if (!HONOR_REG_ALLOC_ORDER)
1852 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
1853 /* We need to save/restore the hard register in
1854 epilogue/prologue. Therefore we increase the cost. */
1856 rclass = REGNO_REG_CLASS (hard_regno);
1857 add_cost = ((ira_memory_move_cost[mode][rclass][0]
1858 + ira_memory_move_cost[mode][rclass][1])
1859 * saved_nregs / hard_regno_nregs[hard_regno][mode] - 1);
1861 full_cost += add_cost;
1864 if (min_cost > cost)
1866 if (min_full_cost > full_cost)
1868 min_full_cost = full_cost;
1869 best_hard_regno = hard_regno;
1870 ira_assert (hard_regno >= 0);
1873 if (min_full_cost > mem_cost)
1875 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1876 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
1877 mem_cost, min_full_cost);
1878 best_hard_regno = -1;
1881 if (best_hard_regno >= 0)
1883 for (i = hard_regno_nregs[best_hard_regno][mode] - 1; i >= 0; i--)
1884 allocated_hardreg_p[best_hard_regno + i] = true;
1887 restore_costs_from_copies (a);
1888 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
1889 ALLOCNO_ASSIGNED_P (a) = true;
1890 if (best_hard_regno >= 0)
1891 update_costs_from_copies (a, true, ! retry_p);
1892 ira_assert (ALLOCNO_CLASS (a) == aclass);
1893 /* We don't need updated costs anymore. */
1894 ira_free_allocno_updated_costs (a);
1895 return best_hard_regno >= 0;
1900 /* An array used to sort copies. */
1901 static ira_copy_t *sorted_copies;
1903 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
1904 used to find a conflict for new allocnos or allocnos with the
1905 different allocno classes. */
1907 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
1911 int n1 = ALLOCNO_NUM_OBJECTS (a1);
1912 int n2 = ALLOCNO_NUM_OBJECTS (a2);
1916 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)];
1917 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)];
1918 if (reg1 != NULL && reg2 != NULL
1919 && ORIGINAL_REGNO (reg1) == ORIGINAL_REGNO (reg2))
1922 for (i = 0; i < n1; i++)
1924 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
1926 for (j = 0; j < n2; j++)
1928 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
1930 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
1931 OBJECT_LIVE_RANGES (c2)))
1938 /* The function is used to sort copies according to their execution
1941 copy_freq_compare_func (const void *v1p, const void *v2p)
1943 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
1951 /* If frequencies are equal, sort by copies, so that the results of
1952 qsort leave nothing to chance. */
1953 return cp1->num - cp2->num;
1958 /* Return true if any allocno from thread of A1 conflicts with any
1959 allocno from thread A2. */
1961 allocno_thread_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
1963 ira_allocno_t a, conflict_a;
1965 for (a = ALLOCNO_COLOR_DATA (a2)->next_thread_allocno;;
1966 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1968 for (conflict_a = ALLOCNO_COLOR_DATA (a1)->next_thread_allocno;;
1969 conflict_a = ALLOCNO_COLOR_DATA (conflict_a)->next_thread_allocno)
1971 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
1973 if (conflict_a == a1)
1982 /* Merge two threads given correspondingly by their first allocnos T1
1983 and T2 (more accurately merging T2 into T1). */
1985 merge_threads (ira_allocno_t t1, ira_allocno_t t2)
1987 ira_allocno_t a, next, last;
1989 gcc_assert (t1 != t2
1990 && ALLOCNO_COLOR_DATA (t1)->first_thread_allocno == t1
1991 && ALLOCNO_COLOR_DATA (t2)->first_thread_allocno == t2);
1992 for (last = t2, a = ALLOCNO_COLOR_DATA (t2)->next_thread_allocno;;
1993 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1995 ALLOCNO_COLOR_DATA (a)->first_thread_allocno = t1;
2000 next = ALLOCNO_COLOR_DATA (t1)->next_thread_allocno;
2001 ALLOCNO_COLOR_DATA (t1)->next_thread_allocno = t2;
2002 ALLOCNO_COLOR_DATA (last)->next_thread_allocno = next;
2003 ALLOCNO_COLOR_DATA (t1)->thread_freq += ALLOCNO_COLOR_DATA (t2)->thread_freq;
2006 /* Create threads by processing CP_NUM copies from sorted copies. We
2007 process the most expensive copies first. */
2009 form_threads_from_copies (int cp_num)
2011 ira_allocno_t a, thread1, thread2;
2015 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
2016 /* Form threads processing copies, most frequently executed
2018 for (; cp_num != 0;)
2020 for (i = 0; i < cp_num; i++)
2022 cp = sorted_copies[i];
2023 thread1 = ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno;
2024 thread2 = ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno;
2025 if (thread1 == thread2)
2027 if (! allocno_thread_conflict_p (thread1, thread2))
2029 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2032 " Forming thread by copy %d:a%dr%d-a%dr%d (freq=%d):\n",
2033 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
2034 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
2036 merge_threads (thread1, thread2);
2037 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2039 thread1 = ALLOCNO_COLOR_DATA (thread1)->first_thread_allocno;
2040 fprintf (ira_dump_file, " Result (freq=%d): a%dr%d(%d)",
2041 ALLOCNO_COLOR_DATA (thread1)->thread_freq,
2042 ALLOCNO_NUM (thread1), ALLOCNO_REGNO (thread1),
2043 ALLOCNO_FREQ (thread1));
2044 for (a = ALLOCNO_COLOR_DATA (thread1)->next_thread_allocno;
2046 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2047 fprintf (ira_dump_file, " a%dr%d(%d)",
2048 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2050 fprintf (ira_dump_file, "\n");
2056 /* Collect the rest of copies. */
2057 for (n = 0; i < cp_num; i++)
2059 cp = sorted_copies[i];
2060 if (ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno
2061 != ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno)
2062 sorted_copies[n++] = cp;
2068 /* Create threads by processing copies of all alocnos from BUCKET. We
2069 process the most expensive copies first. */
2071 form_threads_from_bucket (ira_allocno_t bucket)
2074 ira_copy_t cp, next_cp;
2077 for (a = bucket; a != NULL; a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2079 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2083 next_cp = cp->next_first_allocno_copy;
2084 sorted_copies[cp_num++] = cp;
2086 else if (cp->second == a)
2087 next_cp = cp->next_second_allocno_copy;
2092 form_threads_from_copies (cp_num);
2095 /* Create threads by processing copies of colorable allocno A. We
2096 process most expensive copies first. */
2098 form_threads_from_colorable_allocno (ira_allocno_t a)
2100 ira_allocno_t another_a;
2101 ira_copy_t cp, next_cp;
2104 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2108 next_cp = cp->next_first_allocno_copy;
2109 another_a = cp->second;
2111 else if (cp->second == a)
2113 next_cp = cp->next_second_allocno_copy;
2114 another_a = cp->first;
2118 if ((! ALLOCNO_COLOR_DATA (another_a)->in_graph_p
2119 && !ALLOCNO_COLOR_DATA (another_a)->may_be_spilled_p)
2120 || ALLOCNO_COLOR_DATA (another_a)->colorable_p)
2121 sorted_copies[cp_num++] = cp;
2123 form_threads_from_copies (cp_num);
2126 /* Form initial threads which contain only one allocno. */
2128 init_allocno_threads (void)
2134 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2136 a = ira_allocnos[j];
2137 /* Set up initial thread data: */
2138 ALLOCNO_COLOR_DATA (a)->first_thread_allocno
2139 = ALLOCNO_COLOR_DATA (a)->next_thread_allocno = a;
2140 ALLOCNO_COLOR_DATA (a)->thread_freq = ALLOCNO_FREQ (a);
2146 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
2148 /* Bucket of allocnos that can colored currently without spilling. */
2149 static ira_allocno_t colorable_allocno_bucket;
2151 /* Bucket of allocnos that might be not colored currently without
2153 static ira_allocno_t uncolorable_allocno_bucket;
2155 /* The current number of allocnos in the uncolorable_bucket. */
2156 static int uncolorable_allocnos_num;
2158 /* Return the current spill priority of allocno A. The less the
2159 number, the more preferable the allocno for spilling. */
2161 allocno_spill_priority (ira_allocno_t a)
2163 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
2166 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2167 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
2171 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
2174 add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
2176 ira_allocno_t first_a;
2177 allocno_color_data_t data;
2179 if (bucket_ptr == &uncolorable_allocno_bucket
2180 && ALLOCNO_CLASS (a) != NO_REGS)
2182 uncolorable_allocnos_num++;
2183 ira_assert (uncolorable_allocnos_num > 0);
2185 first_a = *bucket_ptr;
2186 data = ALLOCNO_COLOR_DATA (a);
2187 data->next_bucket_allocno = first_a;
2188 data->prev_bucket_allocno = NULL;
2189 if (first_a != NULL)
2190 ALLOCNO_COLOR_DATA (first_a)->prev_bucket_allocno = a;
2194 /* Compare two allocnos to define which allocno should be pushed first
2195 into the coloring stack. If the return is a negative number, the
2196 allocno given by the first parameter will be pushed first. In this
2197 case such allocno has less priority than the second one and the
2198 hard register will be assigned to it after assignment to the second
2199 one. As the result of such assignment order, the second allocno
2200 has a better chance to get the best hard register. */
2202 bucket_allocno_compare_func (const void *v1p, const void *v2p)
2204 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2205 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2206 int diff, freq1, freq2, a1_num, a2_num;
2207 ira_allocno_t t1 = ALLOCNO_COLOR_DATA (a1)->first_thread_allocno;
2208 ira_allocno_t t2 = ALLOCNO_COLOR_DATA (a2)->first_thread_allocno;
2209 int cl1 = ALLOCNO_CLASS (a1), cl2 = ALLOCNO_CLASS (a2);
2211 freq1 = ALLOCNO_COLOR_DATA (t1)->thread_freq;
2212 freq2 = ALLOCNO_COLOR_DATA (t2)->thread_freq;
2213 if ((diff = freq1 - freq2) != 0)
2216 if ((diff = ALLOCNO_NUM (t2) - ALLOCNO_NUM (t1)) != 0)
2219 /* Push pseudos requiring less hard registers first. It means that
2220 we will assign pseudos requiring more hard registers first
2221 avoiding creation small holes in free hard register file into
2222 which the pseudos requiring more hard registers can not fit. */
2223 if ((diff = (ira_reg_class_max_nregs[cl1][ALLOCNO_MODE (a1)]
2224 - ira_reg_class_max_nregs[cl2][ALLOCNO_MODE (a2)])) != 0)
2227 freq1 = ALLOCNO_FREQ (a1);
2228 freq2 = ALLOCNO_FREQ (a2);
2229 if ((diff = freq1 - freq2) != 0)
2232 a1_num = ALLOCNO_COLOR_DATA (a1)->available_regs_num;
2233 a2_num = ALLOCNO_COLOR_DATA (a2)->available_regs_num;
2234 if ((diff = a2_num - a1_num) != 0)
2236 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2239 /* Sort bucket *BUCKET_PTR and return the result through
2242 sort_bucket (ira_allocno_t *bucket_ptr,
2243 int (*compare_func) (const void *, const void *))
2245 ira_allocno_t a, head;
2248 for (n = 0, a = *bucket_ptr;
2250 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2251 sorted_allocnos[n++] = a;
2254 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func);
2256 for (n--; n >= 0; n--)
2258 a = sorted_allocnos[n];
2259 ALLOCNO_COLOR_DATA (a)->next_bucket_allocno = head;
2260 ALLOCNO_COLOR_DATA (a)->prev_bucket_allocno = NULL;
2262 ALLOCNO_COLOR_DATA (head)->prev_bucket_allocno = a;
2268 /* Add ALLOCNO to colorable bucket maintaining the order according
2269 their priority. ALLOCNO should be not in a bucket before the
2272 add_allocno_to_ordered_colorable_bucket (ira_allocno_t allocno)
2274 ira_allocno_t before, after;
2276 form_threads_from_colorable_allocno (allocno);
2277 for (before = colorable_allocno_bucket, after = NULL;
2280 before = ALLOCNO_COLOR_DATA (before)->next_bucket_allocno)
2281 if (bucket_allocno_compare_func (&allocno, &before) < 0)
2283 ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno = before;
2284 ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno = after;
2286 colorable_allocno_bucket = allocno;
2288 ALLOCNO_COLOR_DATA (after)->next_bucket_allocno = allocno;
2290 ALLOCNO_COLOR_DATA (before)->prev_bucket_allocno = allocno;
2293 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
2296 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
2298 ira_allocno_t prev_allocno, next_allocno;
2300 if (bucket_ptr == &uncolorable_allocno_bucket
2301 && ALLOCNO_CLASS (allocno) != NO_REGS)
2303 uncolorable_allocnos_num--;
2304 ira_assert (uncolorable_allocnos_num >= 0);
2306 prev_allocno = ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno;
2307 next_allocno = ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno;
2308 if (prev_allocno != NULL)
2309 ALLOCNO_COLOR_DATA (prev_allocno)->next_bucket_allocno = next_allocno;
2312 ira_assert (*bucket_ptr == allocno);
2313 *bucket_ptr = next_allocno;
2315 if (next_allocno != NULL)
2316 ALLOCNO_COLOR_DATA (next_allocno)->prev_bucket_allocno = prev_allocno;
2319 /* Put allocno A onto the coloring stack without removing it from its
2320 bucket. Pushing allocno to the coloring stack can result in moving
2321 conflicting allocnos from the uncolorable bucket to the colorable
2324 push_allocno_to_stack (ira_allocno_t a)
2326 enum reg_class aclass;
2327 allocno_color_data_t data, conflict_data;
2328 int size, i, n = ALLOCNO_NUM_OBJECTS (a);
2330 data = ALLOCNO_COLOR_DATA (a);
2331 data->in_graph_p = false;
2332 allocno_stack_vec.safe_push (a);
2333 aclass = ALLOCNO_CLASS (a);
2334 if (aclass == NO_REGS)
2336 size = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2339 /* We will deal with the subwords individually. */
2340 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
2343 for (i = 0; i < n; i++)
2345 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2346 ira_object_t conflict_obj;
2347 ira_object_conflict_iterator oci;
2349 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2351 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2353 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
2354 if (conflict_data->colorable_p
2355 || ! conflict_data->in_graph_p
2356 || ALLOCNO_ASSIGNED_P (conflict_a)
2357 || !(hard_reg_set_intersect_p
2358 (ALLOCNO_COLOR_DATA (a)->profitable_hard_regs,
2359 conflict_data->profitable_hard_regs)))
2361 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,
2362 ALLOCNO_NUM (conflict_a)));
2363 if (update_left_conflict_sizes_p (conflict_a, a, size))
2365 delete_allocno_from_bucket
2366 (conflict_a, &uncolorable_allocno_bucket);
2367 add_allocno_to_ordered_colorable_bucket (conflict_a);
2368 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2370 fprintf (ira_dump_file, " Making");
2371 ira_print_expanded_allocno (conflict_a);
2372 fprintf (ira_dump_file, " colorable\n");
2380 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
2381 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
2383 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
2386 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
2388 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
2389 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2391 fprintf (ira_dump_file, " Pushing");
2392 ira_print_expanded_allocno (allocno);
2394 fprintf (ira_dump_file, "(cost %d)\n",
2395 ALLOCNO_COLOR_DATA (allocno)->temp);
2397 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
2398 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
2399 allocno_spill_priority (allocno),
2400 ALLOCNO_COLOR_DATA (allocno)->temp);
2403 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p = true;
2404 push_allocno_to_stack (allocno);
2407 /* Put all allocnos from colorable bucket onto the coloring stack. */
2409 push_only_colorable (void)
2411 form_threads_from_bucket (colorable_allocno_bucket);
2412 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
2413 for (;colorable_allocno_bucket != NULL;)
2414 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
2417 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
2418 loop given by its LOOP_NODE. */
2420 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
2427 ira_assert (current_loops != NULL && loop_node->loop != NULL
2428 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
2432 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2433 if (e->src != loop_node->loop->latch
2435 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2436 && bitmap_bit_p (df_get_live_in (e->dest), regno))))
2437 freq += EDGE_FREQUENCY (e);
2441 edges = get_loop_exit_edges (loop_node->loop);
2442 FOR_EACH_VEC_ELT (edges, i, e)
2444 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2445 && bitmap_bit_p (df_get_live_in (e->dest), regno)))
2446 freq += EDGE_FREQUENCY (e);
2450 return REG_FREQ_FROM_EDGE_FREQ (freq);
2453 /* Calculate and return the cost of putting allocno A into memory. */
2455 calculate_allocno_spill_cost (ira_allocno_t a)
2459 enum reg_class rclass;
2460 ira_allocno_t parent_allocno;
2461 ira_loop_tree_node_t parent_node, loop_node;
2463 regno = ALLOCNO_REGNO (a);
2464 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_CLASS_COST (a);
2465 if (ALLOCNO_CAP (a) != NULL)
2467 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2468 if ((parent_node = loop_node->parent) == NULL)
2470 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
2472 mode = ALLOCNO_MODE (a);
2473 rclass = ALLOCNO_CLASS (a);
2474 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
2475 cost -= (ira_memory_move_cost[mode][rclass][0]
2476 * ira_loop_edge_freq (loop_node, regno, true)
2477 + ira_memory_move_cost[mode][rclass][1]
2478 * ira_loop_edge_freq (loop_node, regno, false));
2481 ira_init_register_move_cost_if_necessary (mode);
2482 cost += ((ira_memory_move_cost[mode][rclass][1]
2483 * ira_loop_edge_freq (loop_node, regno, true)
2484 + ira_memory_move_cost[mode][rclass][0]
2485 * ira_loop_edge_freq (loop_node, regno, false))
2486 - (ira_register_move_cost[mode][rclass][rclass]
2487 * (ira_loop_edge_freq (loop_node, regno, false)
2488 + ira_loop_edge_freq (loop_node, regno, true))));
2493 /* Used for sorting allocnos for spilling. */
2495 allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
2497 int pri1, pri2, diff;
2499 if (ALLOCNO_BAD_SPILL_P (a1) && ! ALLOCNO_BAD_SPILL_P (a2))
2501 if (ALLOCNO_BAD_SPILL_P (a2) && ! ALLOCNO_BAD_SPILL_P (a1))
2503 pri1 = allocno_spill_priority (a1);
2504 pri2 = allocno_spill_priority (a2);
2505 if ((diff = pri1 - pri2) != 0)
2508 = ALLOCNO_COLOR_DATA (a1)->temp - ALLOCNO_COLOR_DATA (a2)->temp) != 0)
2510 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2513 /* Used for sorting allocnos for spilling. */
2515 allocno_spill_sort_compare (const void *v1p, const void *v2p)
2517 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2518 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2520 return allocno_spill_priority_compare (p1, p2);
2523 /* Push allocnos to the coloring stack. The order of allocnos in the
2524 stack defines the order for the subsequent coloring. */
2526 push_allocnos_to_stack (void)
2531 /* Calculate uncolorable allocno spill costs. */
2532 for (a = uncolorable_allocno_bucket;
2534 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2535 if (ALLOCNO_CLASS (a) != NO_REGS)
2537 cost = calculate_allocno_spill_cost (a);
2538 /* ??? Remove cost of copies between the coalesced
2540 ALLOCNO_COLOR_DATA (a)->temp = cost;
2542 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2545 push_only_colorable ();
2546 a = uncolorable_allocno_bucket;
2549 remove_allocno_from_bucket_and_push (a, false);
2551 ira_assert (colorable_allocno_bucket == NULL
2552 && uncolorable_allocno_bucket == NULL);
2553 ira_assert (uncolorable_allocnos_num == 0);
2556 /* Pop the coloring stack and assign hard registers to the popped
2559 pop_allocnos_from_stack (void)
2561 ira_allocno_t allocno;
2562 enum reg_class aclass;
2564 for (;allocno_stack_vec.length () != 0;)
2566 allocno = allocno_stack_vec.pop ();
2567 aclass = ALLOCNO_CLASS (allocno);
2568 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2570 fprintf (ira_dump_file, " Popping");
2571 ira_print_expanded_allocno (allocno);
2572 fprintf (ira_dump_file, " -- ");
2574 if (aclass == NO_REGS)
2576 ALLOCNO_HARD_REGNO (allocno) = -1;
2577 ALLOCNO_ASSIGNED_P (allocno) = true;
2578 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
2580 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
2581 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2582 fprintf (ira_dump_file, "assign memory\n");
2584 else if (assign_hard_reg (allocno, false))
2586 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2587 fprintf (ira_dump_file, "assign reg %d\n",
2588 ALLOCNO_HARD_REGNO (allocno));
2590 else if (ALLOCNO_ASSIGNED_P (allocno))
2592 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2593 fprintf (ira_dump_file, "spill%s\n",
2594 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p
2597 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2601 /* Set up number of available hard registers for allocno A. */
2603 setup_allocno_available_regs_num (ira_allocno_t a)
2605 int i, n, hard_regno, hard_regs_num, nwords;
2606 enum reg_class aclass;
2607 allocno_color_data_t data;
2609 aclass = ALLOCNO_CLASS (a);
2610 data = ALLOCNO_COLOR_DATA (a);
2611 data->available_regs_num = 0;
2612 if (aclass == NO_REGS)
2614 hard_regs_num = ira_class_hard_regs_num[aclass];
2615 nwords = ALLOCNO_NUM_OBJECTS (a);
2616 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
2618 hard_regno = ira_class_hard_regs[aclass][i];
2619 /* Checking only profitable hard regs. */
2620 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
2623 data->available_regs_num = n;
2624 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULL)
2628 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2629 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2630 reg_class_names[aclass], ira_class_hard_regs_num[aclass], n);
2631 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2632 fprintf (ira_dump_file, ", %snode: ",
2633 hard_reg_set_equal_p (data->profitable_hard_regs,
2634 data->hard_regs_node->hard_regs->set)
2636 print_hard_reg_set (ira_dump_file,
2637 data->hard_regs_node->hard_regs->set, false);
2638 for (i = 0; i < nwords; i++)
2640 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2645 fprintf (ira_dump_file, ", ");
2646 fprintf (ira_dump_file, " obj %d", i);
2648 fprintf (ira_dump_file, " (confl regs = ");
2649 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2651 fprintf (ira_dump_file, ")");
2653 fprintf (ira_dump_file, "\n");
2656 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2657 conflicting allocnos and hard registers. */
2659 put_allocno_into_bucket (ira_allocno_t allocno)
2661 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2662 setup_allocno_available_regs_num (allocno);
2663 if (setup_left_conflict_sizes_p (allocno))
2664 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
2666 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
2669 /* Map: allocno number -> allocno priority. */
2670 static int *allocno_priorities;
2672 /* Set up priorities for N allocnos in array
2673 CONSIDERATION_ALLOCNOS. */
2675 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
2677 int i, length, nrefs, priority, max_priority, mult;
2681 for (i = 0; i < n; i++)
2683 a = consideration_allocnos[i];
2684 nrefs = ALLOCNO_NREFS (a);
2685 ira_assert (nrefs >= 0);
2686 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2687 ira_assert (mult >= 0);
2688 allocno_priorities[ALLOCNO_NUM (a)]
2691 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a))
2692 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
2694 priority = -priority;
2695 if (max_priority < priority)
2696 max_priority = priority;
2698 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2699 for (i = 0; i < n; i++)
2701 a = consideration_allocnos[i];
2702 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2703 if (ALLOCNO_NUM_OBJECTS (a) > 1)
2704 length /= ALLOCNO_NUM_OBJECTS (a);
2707 allocno_priorities[ALLOCNO_NUM (a)]
2708 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2712 /* Sort allocnos according to the profit of usage of a hard register
2713 instead of memory for them. */
2715 allocno_cost_compare_func (const void *v1p, const void *v2p)
2717 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2718 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2721 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_CLASS_COST (p1);
2722 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_CLASS_COST (p2);
2726 /* If regs are equally good, sort by allocno numbers, so that the
2727 results of qsort leave nothing to chance. */
2728 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
2731 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2732 possible to hard registers. Let us try to improve allocation with
2733 cost point of view. This function improves the allocation by
2734 spilling some allocnos and assigning the freed hard registers to
2735 other allocnos if it decreases the overall allocation cost. */
2737 improve_allocation (void)
2740 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
2741 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
2743 enum reg_class aclass;
2746 int costs[FIRST_PSEUDO_REGISTER];
2747 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
2751 /* Clear counts used to process conflicting allocnos only once for
2753 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2754 ALLOCNO_COLOR_DATA (ira_allocnos[i])->temp = 0;
2756 /* Process each allocno and try to assign a hard register to it by
2757 spilling some its conflicting allocnos. */
2758 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2760 a = ira_allocnos[i];
2761 ALLOCNO_COLOR_DATA (a)->temp = 0;
2762 if (empty_profitable_hard_regs (a))
2765 aclass = ALLOCNO_CLASS (a);
2766 allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
2767 if (allocno_costs == NULL)
2768 allocno_costs = ALLOCNO_HARD_REG_COSTS (a);
2769 if ((hregno = ALLOCNO_HARD_REGNO (a)) < 0)
2770 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a);
2771 else if (allocno_costs == NULL)
2772 /* It means that assigning a hard register is not profitable
2773 (we don't waste memory for hard register costs in this
2777 base_cost = allocno_costs[ira_class_hard_reg_index[aclass][hregno]];
2779 get_conflict_and_start_profitable_regs (a, false,
2781 &profitable_hard_regs);
2782 class_size = ira_class_hard_regs_num[aclass];
2783 /* Set up cost improvement for usage of each profitable hard
2784 register for allocno A. */
2785 for (j = 0; j < class_size; j++)
2787 hregno = ira_class_hard_regs[aclass][j];
2788 if (! check_hard_reg_p (a, hregno,
2789 conflicting_regs, profitable_hard_regs))
2791 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j);
2792 k = allocno_costs == NULL ? 0 : j;
2793 costs[hregno] = (allocno_costs == NULL
2794 ? ALLOCNO_UPDATED_CLASS_COST (a) : allocno_costs[k]);
2795 costs[hregno] -= base_cost;
2796 if (costs[hregno] < 0)
2800 /* There is no chance to improve the allocation cost by
2801 assigning hard register to allocno A even without spilling
2802 conflicting allocnos. */
2804 mode = ALLOCNO_MODE (a);
2805 nwords = ALLOCNO_NUM_OBJECTS (a);
2806 /* Process each allocno conflicting with A and update the cost
2807 improvement for profitable hard registers of A. To use a
2808 hard register for A we need to spill some conflicting
2809 allocnos and that creates penalty for the cost
2811 for (word = 0; word < nwords; word++)
2813 ira_object_t conflict_obj;
2814 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2815 ira_object_conflict_iterator oci;
2817 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2819 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2821 if (ALLOCNO_COLOR_DATA (conflict_a)->temp == check)
2822 /* We already processed this conflicting allocno
2823 because we processed earlier another object of the
2824 conflicting allocno. */
2826 ALLOCNO_COLOR_DATA (conflict_a)->temp = check;
2827 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2829 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a);
2830 k = (ira_class_hard_reg_index
2831 [ALLOCNO_CLASS (conflict_a)][conflict_hregno]);
2832 ira_assert (k >= 0);
2833 if ((allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a))
2835 spill_cost -= allocno_costs[k];
2836 else if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a))
2838 spill_cost -= allocno_costs[k];
2840 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
2842 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2843 for (r = conflict_hregno;
2844 r >= 0 && r + hard_regno_nregs[r][mode] > conflict_hregno;
2846 if (check_hard_reg_p (a, r,
2847 conflicting_regs, profitable_hard_regs))
2848 costs[r] += spill_cost;
2849 for (r = conflict_hregno + 1;
2850 r < conflict_hregno + conflict_nregs;
2852 if (check_hard_reg_p (a, r,
2853 conflicting_regs, profitable_hard_regs))
2854 costs[r] += spill_cost;
2859 /* Now we choose hard register for A which results in highest
2860 allocation cost improvement. */
2861 for (j = 0; j < class_size; j++)
2863 hregno = ira_class_hard_regs[aclass][j];
2864 if (check_hard_reg_p (a, hregno,
2865 conflicting_regs, profitable_hard_regs)
2866 && min_cost > costs[hregno])
2869 min_cost = costs[hregno];
2873 /* We are in a situation when assigning any hard register to A
2874 by spilling some conflicting allocnos does not improve the
2877 nregs = hard_regno_nregs[best][mode];
2878 /* Now spill conflicting allocnos which contain a hard register
2879 of A when we assign the best chosen hard register to it. */
2880 for (word = 0; word < nwords; word++)
2882 ira_object_t conflict_obj;
2883 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2884 ira_object_conflict_iterator oci;
2886 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2888 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2890 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2893 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2894 if (best + nregs <= conflict_hregno
2895 || conflict_hregno + conflict_nregs <= best)
2896 /* No intersection. */
2898 ALLOCNO_HARD_REGNO (conflict_a) = -1;
2899 sorted_allocnos[n++] = conflict_a;
2900 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2901 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
2902 ALLOCNO_NUM (conflict_a), ALLOCNO_REGNO (conflict_a),
2903 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2906 /* Assign the best chosen hard register to A. */
2907 ALLOCNO_HARD_REGNO (a) = best;
2908 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2909 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
2910 best, ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2914 /* We spilled some allocnos to assign their hard registers to other
2915 allocnos. The spilled allocnos are now in array
2916 'sorted_allocnos'. There is still a possibility that some of the
2917 spilled allocnos can get hard registers. So let us try assign
2918 them hard registers again (just a reminder -- function
2919 'assign_hard_reg' assigns hard registers only if it is possible
2920 and profitable). We process the spilled allocnos with biggest
2921 benefit to get hard register first -- see function
2922 'allocno_cost_compare_func'. */
2923 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2924 allocno_cost_compare_func);
2925 for (j = 0; j < n; j++)
2927 a = sorted_allocnos[j];
2928 ALLOCNO_ASSIGNED_P (a) = false;
2929 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2931 fprintf (ira_dump_file, " ");
2932 ira_print_expanded_allocno (a);
2933 fprintf (ira_dump_file, " -- ");
2935 if (assign_hard_reg (a, false))
2937 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2938 fprintf (ira_dump_file, "assign hard reg %d\n",
2939 ALLOCNO_HARD_REGNO (a));
2943 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2944 fprintf (ira_dump_file, "assign memory\n");
2949 /* Sort allocnos according to their priorities. */
2951 allocno_priority_compare_func (const void *v1p, const void *v2p)
2953 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2954 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2957 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
2958 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
2960 return SORTGT (pri2, pri1);
2962 /* If regs are equally good, sort by allocnos, so that the results of
2963 qsort leave nothing to chance. */
2964 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2967 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2968 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2970 color_allocnos (void)
2976 setup_profitable_hard_regs ();
2977 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2980 HARD_REG_SET conflict_hard_regs;
2981 allocno_color_data_t data;
2982 ira_pref_t pref, next_pref;
2984 a = ira_allocnos[i];
2985 nr = ALLOCNO_NUM_OBJECTS (a);
2986 CLEAR_HARD_REG_SET (conflict_hard_regs);
2987 for (l = 0; l < nr; l++)
2989 ira_object_t obj = ALLOCNO_OBJECT (a, l);
2990 IOR_HARD_REG_SET (conflict_hard_regs,
2991 OBJECT_CONFLICT_HARD_REGS (obj));
2993 data = ALLOCNO_COLOR_DATA (a);
2994 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
2996 next_pref = pref->next_pref;
2997 if (! ira_hard_reg_in_set_p (pref->hard_regno,
2999 data->profitable_hard_regs))
3000 ira_remove_pref (pref);
3003 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
3006 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3008 a = ira_allocnos[i];
3009 if (ALLOCNO_CLASS (a) == NO_REGS)
3011 ALLOCNO_HARD_REGNO (a) = -1;
3012 ALLOCNO_ASSIGNED_P (a) = true;
3013 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3014 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3015 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3017 fprintf (ira_dump_file, " Spill");
3018 ira_print_expanded_allocno (a);
3019 fprintf (ira_dump_file, "\n");
3023 sorted_allocnos[n++] = a;
3027 setup_allocno_priorities (sorted_allocnos, n);
3028 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
3029 allocno_priority_compare_func);
3030 for (i = 0; i < n; i++)
3032 a = sorted_allocnos[i];
3033 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3035 fprintf (ira_dump_file, " ");
3036 ira_print_expanded_allocno (a);
3037 fprintf (ira_dump_file, " -- ");
3039 if (assign_hard_reg (a, false))
3041 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3042 fprintf (ira_dump_file, "assign hard reg %d\n",
3043 ALLOCNO_HARD_REGNO (a));
3047 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3048 fprintf (ira_dump_file, "assign memory\n");
3055 form_allocno_hard_regs_nodes_forest ();
3056 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3057 print_hard_regs_forest (ira_dump_file);
3058 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3060 a = ira_allocnos[i];
3061 if (ALLOCNO_CLASS (a) != NO_REGS && ! empty_profitable_hard_regs (a))
3063 ALLOCNO_COLOR_DATA (a)->in_graph_p = true;
3064 update_costs_from_prefs (a);
3068 ALLOCNO_HARD_REGNO (a) = -1;
3069 ALLOCNO_ASSIGNED_P (a) = true;
3070 /* We don't need updated costs anymore. */
3071 ira_free_allocno_updated_costs (a);
3072 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3074 fprintf (ira_dump_file, " Spill");
3075 ira_print_expanded_allocno (a);
3076 fprintf (ira_dump_file, "\n");
3080 /* Put the allocnos into the corresponding buckets. */
3081 colorable_allocno_bucket = NULL;
3082 uncolorable_allocno_bucket = NULL;
3083 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3085 a = ira_allocnos[i];
3086 if (ALLOCNO_COLOR_DATA (a)->in_graph_p)
3087 put_allocno_into_bucket (a);
3089 push_allocnos_to_stack ();
3090 pop_allocnos_from_stack ();
3091 finish_allocno_hard_regs_nodes_forest ();
3093 improve_allocation ();
3098 /* Output information about the loop given by its LOOP_TREE_NODE. */
3100 print_loop_title (ira_loop_tree_node_t loop_tree_node)
3104 ira_loop_tree_node_t subloop_node, dest_loop_node;
3108 if (loop_tree_node->parent == NULL)
3109 fprintf (ira_dump_file,
3110 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
3114 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL);
3115 fprintf (ira_dump_file,
3116 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
3117 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
3118 loop_tree_node->loop->header->index,
3119 loop_depth (loop_tree_node->loop));
3121 for (subloop_node = loop_tree_node->children;
3122 subloop_node != NULL;
3123 subloop_node = subloop_node->next)
3124 if (subloop_node->bb != NULL)
3126 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
3127 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
3128 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3129 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
3131 fprintf (ira_dump_file, "(->%d:l%d)",
3132 e->dest->index, dest_loop_node->loop_num);
3134 fprintf (ira_dump_file, "\n all:");
3135 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3136 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3137 fprintf (ira_dump_file, "\n modified regnos:");
3138 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
3139 fprintf (ira_dump_file, " %d", j);
3140 fprintf (ira_dump_file, "\n border:");
3141 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
3142 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3143 fprintf (ira_dump_file, "\n Pressure:");
3144 for (j = 0; (int) j < ira_pressure_classes_num; j++)
3146 enum reg_class pclass;
3148 pclass = ira_pressure_classes[j];
3149 if (loop_tree_node->reg_pressure[pclass] == 0)
3151 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
3152 loop_tree_node->reg_pressure[pclass]);
3154 fprintf (ira_dump_file, "\n");
3157 /* Color the allocnos inside loop (in the extreme case it can be all
3158 of the function) given the corresponding LOOP_TREE_NODE. The
3159 function is called for each loop during top-down traverse of the
3162 color_pass (ira_loop_tree_node_t loop_tree_node)
3164 int regno, hard_regno, index = -1, n;
3165 int cost, exit_freq, enter_freq;
3169 enum reg_class rclass, aclass, pclass;
3170 ira_allocno_t a, subloop_allocno;
3171 ira_loop_tree_node_t subloop_node;
3173 ira_assert (loop_tree_node->bb == NULL);
3174 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3175 print_loop_title (loop_tree_node);
3177 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
3178 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
3180 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3182 a = ira_allocnos[j];
3184 if (! ALLOCNO_ASSIGNED_P (a))
3186 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
3189 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
3191 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
3192 curr_allocno_process = 0;
3194 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3196 a = ira_allocnos[j];
3197 ALLOCNO_ADD_DATA (a) = allocno_color_data + n;
3200 init_allocno_threads ();
3201 /* Color all mentioned allocnos including transparent ones. */
3203 /* Process caps. They are processed just once. */
3204 if (flag_ira_region == IRA_REGION_MIXED
3205 || flag_ira_region == IRA_REGION_ALL)
3206 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3208 a = ira_allocnos[j];
3209 if (ALLOCNO_CAP_MEMBER (a) == NULL)
3211 /* Remove from processing in the next loop. */
3212 bitmap_clear_bit (consideration_allocno_bitmap, j);
3213 rclass = ALLOCNO_CLASS (a);
3214 pclass = ira_pressure_class_translate[rclass];
3215 if (flag_ira_region == IRA_REGION_MIXED
3216 && (loop_tree_node->reg_pressure[pclass]
3217 <= ira_class_hard_regs_num[pclass]))
3219 mode = ALLOCNO_MODE (a);
3220 hard_regno = ALLOCNO_HARD_REGNO (a);
3221 if (hard_regno >= 0)
3223 index = ira_class_hard_reg_index[rclass][hard_regno];
3224 ira_assert (index >= 0);
3226 regno = ALLOCNO_REGNO (a);
3227 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
3228 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
3229 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
3230 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3231 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3232 if (hard_regno >= 0)
3233 update_costs_from_copies (subloop_allocno, true, true);
3234 /* We don't need updated costs anymore. */
3235 ira_free_allocno_updated_costs (subloop_allocno);
3238 /* Update costs of the corresponding allocnos (not caps) in the
3240 for (subloop_node = loop_tree_node->subloops;
3241 subloop_node != NULL;
3242 subloop_node = subloop_node->subloop_next)
3244 ira_assert (subloop_node->bb == NULL);
3245 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3247 a = ira_allocnos[j];
3248 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3249 mode = ALLOCNO_MODE (a);
3250 rclass = ALLOCNO_CLASS (a);
3251 pclass = ira_pressure_class_translate[rclass];
3252 hard_regno = ALLOCNO_HARD_REGNO (a);
3253 /* Use hard register class here. ??? */
3254 if (hard_regno >= 0)
3256 index = ira_class_hard_reg_index[rclass][hard_regno];
3257 ira_assert (index >= 0);
3259 regno = ALLOCNO_REGNO (a);
3260 /* ??? conflict costs */
3261 subloop_allocno = subloop_node->regno_allocno_map[regno];
3262 if (subloop_allocno == NULL
3263 || ALLOCNO_CAP (subloop_allocno) != NULL)
3265 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass);
3266 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
3267 ALLOCNO_NUM (subloop_allocno)));
3268 if ((flag_ira_region == IRA_REGION_MIXED
3269 && (loop_tree_node->reg_pressure[pclass]
3270 <= ira_class_hard_regs_num[pclass]))
3271 || (pic_offset_table_rtx != NULL
3272 && regno == (int) REGNO (pic_offset_table_rtx))
3273 /* Avoid overlapped multi-registers. Moves between them
3274 might result in wrong code generation. */
3276 && ira_reg_class_max_nregs[pclass][mode] > 1))
3278 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3280 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3281 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3282 if (hard_regno >= 0)
3283 update_costs_from_copies (subloop_allocno, true, true);
3284 /* We don't need updated costs anymore. */
3285 ira_free_allocno_updated_costs (subloop_allocno);
3289 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3290 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3291 ira_assert (regno < ira_reg_equiv_len);
3292 if (ira_equiv_no_lvalue_p (regno))
3294 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3296 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3297 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3298 if (hard_regno >= 0)
3299 update_costs_from_copies (subloop_allocno, true, true);
3300 /* We don't need updated costs anymore. */
3301 ira_free_allocno_updated_costs (subloop_allocno);
3304 else if (hard_regno < 0)
3306 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3307 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
3308 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
3312 aclass = ALLOCNO_CLASS (subloop_allocno);
3313 ira_init_register_move_cost_if_necessary (mode);
3314 cost = (ira_register_move_cost[mode][rclass][rclass]
3315 * (exit_freq + enter_freq));
3316 ira_allocate_and_set_or_copy_costs
3317 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), aclass,
3318 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno),
3319 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
3320 ira_allocate_and_set_or_copy_costs
3321 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
3322 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
3323 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
3324 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
3326 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3327 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
3328 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3329 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
3330 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3331 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
3332 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
3336 ira_free (allocno_color_data);
3337 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3339 a = ira_allocnos[j];
3340 ALLOCNO_ADD_DATA (a) = NULL;
3344 /* Initialize the common data for coloring and calls functions to do
3345 Chaitin-Briggs and regional coloring. */
3349 coloring_allocno_bitmap = ira_allocate_bitmap ();
3350 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3351 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
3353 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
3355 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3356 ira_print_disposition (ira_dump_file);
3358 ira_free_bitmap (coloring_allocno_bitmap);
3363 /* Move spill/restore code, which are to be generated in ira-emit.c,
3364 to less frequent points (if it is profitable) by reassigning some
3365 allocnos (in loop with subloops containing in another loop) to
3366 memory which results in longer live-range where the corresponding
3367 pseudo-registers will be in memory. */
3369 move_spill_restore (void)
3371 int cost, regno, hard_regno, hard_regno2, index;
3373 int enter_freq, exit_freq;
3375 enum reg_class rclass;
3376 ira_allocno_t a, parent_allocno, subloop_allocno;
3377 ira_loop_tree_node_t parent, loop_node, subloop_node;
3378 ira_allocno_iterator ai;
3383 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3384 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
3385 FOR_EACH_ALLOCNO (a, ai)
3387 regno = ALLOCNO_REGNO (a);
3388 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
3389 if (ALLOCNO_CAP_MEMBER (a) != NULL
3390 || ALLOCNO_CAP (a) != NULL
3391 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
3392 || loop_node->children == NULL
3393 /* don't do the optimization because it can create
3394 copies and the reload pass can spill the allocno set
3395 by copy although the allocno will not get memory
3397 || ira_equiv_no_lvalue_p (regno)
3398 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)))
3400 mode = ALLOCNO_MODE (a);
3401 rclass = ALLOCNO_CLASS (a);
3402 index = ira_class_hard_reg_index[rclass][hard_regno];
3403 ira_assert (index >= 0);
3404 cost = (ALLOCNO_MEMORY_COST (a)
3405 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
3406 ? ALLOCNO_CLASS_COST (a)
3407 : ALLOCNO_HARD_REG_COSTS (a)[index]));
3408 ira_init_register_move_cost_if_necessary (mode);
3409 for (subloop_node = loop_node->subloops;
3410 subloop_node != NULL;
3411 subloop_node = subloop_node->subloop_next)
3413 ira_assert (subloop_node->bb == NULL);
3414 subloop_allocno = subloop_node->regno_allocno_map[regno];
3415 if (subloop_allocno == NULL)
3417 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno));
3418 /* We have accumulated cost. To get the real cost of
3419 allocno usage in the loop we should subtract costs of
3420 the subloop allocnos. */
3421 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
3422 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
3423 ? ALLOCNO_CLASS_COST (subloop_allocno)
3424 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
3425 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3426 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3427 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
3428 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3429 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3433 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
3434 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3435 if (hard_regno2 != hard_regno)
3436 cost -= (ira_register_move_cost[mode][rclass][rclass]
3437 * (exit_freq + enter_freq));
3440 if ((parent = loop_node->parent) != NULL
3441 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
3443 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno));
3444 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
3445 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
3446 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
3447 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3448 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3452 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
3453 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
3454 if (hard_regno2 != hard_regno)
3455 cost -= (ira_register_move_cost[mode][rclass][rclass]
3456 * (exit_freq + enter_freq));
3461 ALLOCNO_HARD_REGNO (a) = -1;
3462 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3466 " Moving spill/restore for a%dr%d up from loop %d",
3467 ALLOCNO_NUM (a), regno, loop_node->loop_num);
3468 fprintf (ira_dump_file, " - profit %d\n", -cost);
3480 /* Update current hard reg costs and current conflict hard reg costs
3481 for allocno A. It is done by processing its copies containing
3482 other allocnos already assigned. */
3484 update_curr_costs (ira_allocno_t a)
3486 int i, hard_regno, cost;
3488 enum reg_class aclass, rclass;
3489 ira_allocno_t another_a;
3490 ira_copy_t cp, next_cp;
3492 ira_free_allocno_updated_costs (a);
3493 ira_assert (! ALLOCNO_ASSIGNED_P (a));
3494 aclass = ALLOCNO_CLASS (a);
3495 if (aclass == NO_REGS)
3497 mode = ALLOCNO_MODE (a);
3498 ira_init_register_move_cost_if_necessary (mode);
3499 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3503 next_cp = cp->next_first_allocno_copy;
3504 another_a = cp->second;
3506 else if (cp->second == a)
3508 next_cp = cp->next_second_allocno_copy;
3509 another_a = cp->first;
3513 if (! ira_reg_classes_intersect_p[aclass][ALLOCNO_CLASS (another_a)]
3514 || ! ALLOCNO_ASSIGNED_P (another_a)
3515 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
3517 rclass = REGNO_REG_CLASS (hard_regno);
3518 i = ira_class_hard_reg_index[aclass][hard_regno];
3521 cost = (cp->first == a
3522 ? ira_register_move_cost[mode][rclass][aclass]
3523 : ira_register_move_cost[mode][aclass][rclass]);
3524 ira_allocate_and_set_or_copy_costs
3525 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a),
3526 ALLOCNO_HARD_REG_COSTS (a));
3527 ira_allocate_and_set_or_copy_costs
3528 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
3529 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
3530 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3531 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3535 /* Try to assign hard registers to the unassigned allocnos and
3536 allocnos conflicting with them or conflicting with allocnos whose
3537 regno >= START_REGNO. The function is called after ira_flattening,
3538 so more allocnos (including ones created in ira-emit.c) will have a
3539 chance to get a hard register. We use simple assignment algorithm
3540 based on priorities. */
3542 ira_reassign_conflict_allocnos (int start_regno)
3544 int i, allocnos_to_color_num;
3546 enum reg_class aclass;
3547 bitmap allocnos_to_color;
3548 ira_allocno_iterator ai;
3550 allocnos_to_color = ira_allocate_bitmap ();
3551 allocnos_to_color_num = 0;
3552 FOR_EACH_ALLOCNO (a, ai)
3554 int n = ALLOCNO_NUM_OBJECTS (a);
3556 if (! ALLOCNO_ASSIGNED_P (a)
3557 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
3559 if (ALLOCNO_CLASS (a) != NO_REGS)
3560 sorted_allocnos[allocnos_to_color_num++] = a;
3563 ALLOCNO_ASSIGNED_P (a) = true;
3564 ALLOCNO_HARD_REGNO (a) = -1;
3565 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3566 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3568 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
3570 if (ALLOCNO_REGNO (a) < start_regno
3571 || (aclass = ALLOCNO_CLASS (a)) == NO_REGS)
3573 for (i = 0; i < n; i++)
3575 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3576 ira_object_t conflict_obj;
3577 ira_object_conflict_iterator oci;
3579 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3581 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3583 ira_assert (ira_reg_classes_intersect_p
3584 [aclass][ALLOCNO_CLASS (conflict_a)]);
3585 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
3587 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
3591 ira_free_bitmap (allocnos_to_color);
3592 if (allocnos_to_color_num > 1)
3594 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
3595 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
3596 allocno_priority_compare_func);
3598 for (i = 0; i < allocnos_to_color_num; i++)
3600 a = sorted_allocnos[i];
3601 ALLOCNO_ASSIGNED_P (a) = false;
3602 update_curr_costs (a);
3604 for (i = 0; i < allocnos_to_color_num; i++)
3606 a = sorted_allocnos[i];
3607 if (assign_hard_reg (a, true))
3609 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3612 " Secondary allocation: assign hard reg %d to reg %d\n",
3613 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
3620 /* This page contains functions used to find conflicts using allocno
3623 #ifdef ENABLE_IRA_CHECKING
3625 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3626 intersect. This should be used when there is only one region.
3627 Currently this is used during reload. */
3629 conflict_by_live_ranges_p (int regno1, int regno2)
3631 ira_allocno_t a1, a2;
3633 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
3634 && regno2 >= FIRST_PSEUDO_REGISTER);
3635 /* Reg info calculated by dataflow infrastructure can be different
3636 from one calculated by regclass. */
3637 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
3638 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
3640 return allocnos_conflict_by_live_ranges_p (a1, a2);
3647 /* This page contains code to coalesce memory stack slots used by
3648 spilled allocnos. This results in smaller stack frame, better data
3649 locality, and in smaller code for some architectures like
3650 x86/x86_64 where insn size depends on address displacement value.
3651 On the other hand, it can worsen insn scheduling after the RA but
3652 in practice it is less important than smaller stack frames. */
3654 /* TRUE if we coalesced some allocnos. In other words, if we got
3655 loops formed by members first_coalesced_allocno and
3656 next_coalesced_allocno containing more one allocno. */
3657 static bool allocno_coalesced_p;
3659 /* Bitmap used to prevent a repeated allocno processing because of
3661 static bitmap processed_coalesced_allocno_bitmap;
3664 typedef struct coalesce_data *coalesce_data_t;
3666 /* To decrease footprint of ira_allocno structure we store all data
3667 needed only for coalescing in the following structure. */
3668 struct coalesce_data
3670 /* Coalesced allocnos form a cyclic list. One allocno given by
3671 FIRST represents all coalesced allocnos. The
3672 list is chained by NEXT. */
3673 ira_allocno_t first;
3678 /* Container for storing allocno data concerning coalescing. */
3679 static coalesce_data_t allocno_coalesce_data;
3681 /* Macro to access the data concerning coalescing. */
3682 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3684 /* Merge two sets of coalesced allocnos given correspondingly by
3685 allocnos A1 and A2 (more accurately merging A2 set into A1
3688 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
3690 ira_allocno_t a, first, last, next;
3692 first = ALLOCNO_COALESCE_DATA (a1)->first;
3693 a = ALLOCNO_COALESCE_DATA (a2)->first;
3696 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)->next;;
3697 a = ALLOCNO_COALESCE_DATA (a)->next)
3699 ALLOCNO_COALESCE_DATA (a)->first = first;
3704 next = allocno_coalesce_data[ALLOCNO_NUM (first)].next;
3705 allocno_coalesce_data[ALLOCNO_NUM (first)].next = a2;
3706 allocno_coalesce_data[ALLOCNO_NUM (last)].next = next;
3709 /* Return TRUE if there are conflicting allocnos from two sets of
3710 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3711 use live ranges to find conflicts because conflicts are represented
3712 only for allocnos of the same allocno class and during the reload
3713 pass we coalesce allocnos for sharing stack memory slots. */
3715 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
3717 ira_allocno_t a, conflict_a;
3719 if (allocno_coalesced_p)
3721 bitmap_clear (processed_coalesced_allocno_bitmap);
3722 for (a = ALLOCNO_COALESCE_DATA (a1)->next;;
3723 a = ALLOCNO_COALESCE_DATA (a)->next)
3725 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
3730 for (a = ALLOCNO_COALESCE_DATA (a2)->next;;
3731 a = ALLOCNO_COALESCE_DATA (a)->next)
3733 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)->next;;
3734 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)->next)
3736 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
3738 if (conflict_a == a1)
3747 /* The major function for aggressive allocno coalescing. We coalesce
3748 only spilled allocnos. If some allocnos have been coalesced, we
3749 set up flag allocno_coalesced_p. */
3751 coalesce_allocnos (void)
3754 ira_copy_t cp, next_cp;
3756 int i, n, cp_num, regno;
3760 /* Collect copies. */
3761 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
3763 a = ira_allocnos[j];
3764 regno = ALLOCNO_REGNO (a);
3765 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
3766 || ira_equiv_no_lvalue_p (regno))
3768 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3772 next_cp = cp->next_first_allocno_copy;
3773 regno = ALLOCNO_REGNO (cp->second);
3774 /* For priority coloring we coalesce allocnos only with
3775 the same allocno class not with intersected allocno
3776 classes as it were possible. It is done for
3778 if ((cp->insn != NULL || cp->constraint_p)
3779 && ALLOCNO_ASSIGNED_P (cp->second)
3780 && ALLOCNO_HARD_REGNO (cp->second) < 0
3781 && ! ira_equiv_no_lvalue_p (regno))
3782 sorted_copies[cp_num++] = cp;
3784 else if (cp->second == a)
3785 next_cp = cp->next_second_allocno_copy;
3790 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
3791 /* Coalesced copies, most frequently executed first. */
3792 for (; cp_num != 0;)
3794 for (i = 0; i < cp_num; i++)
3796 cp = sorted_copies[i];
3797 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
3799 allocno_coalesced_p = true;
3800 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3803 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3804 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
3805 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
3807 merge_allocnos (cp->first, cp->second);
3812 /* Collect the rest of copies. */
3813 for (n = 0; i < cp_num; i++)
3815 cp = sorted_copies[i];
3816 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)].first
3817 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)].first)
3818 sorted_copies[n++] = cp;
3824 /* Usage cost and order number of coalesced allocno set to which
3825 given pseudo register belongs to. */
3826 static int *regno_coalesced_allocno_cost;
3827 static int *regno_coalesced_allocno_num;
3829 /* Sort pseudos according frequencies of coalesced allocno sets they
3830 belong to (putting most frequently ones first), and according to
3831 coalesced allocno set order numbers. */
3833 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
3835 const int regno1 = *(const int *) v1p;
3836 const int regno2 = *(const int *) v2p;
3839 if ((diff = (regno_coalesced_allocno_cost[regno2]
3840 - regno_coalesced_allocno_cost[regno1])) != 0)
3842 if ((diff = (regno_coalesced_allocno_num[regno1]
3843 - regno_coalesced_allocno_num[regno2])) != 0)
3845 return regno1 - regno2;
3848 /* Widest width in which each pseudo reg is referred to (via subreg).
3849 It is used for sorting pseudo registers. */
3850 static unsigned int *regno_max_ref_width;
3852 /* Sort pseudos according their slot numbers (putting ones with
3853 smaller numbers first, or last when the frame pointer is not
3856 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
3858 const int regno1 = *(const int *) v1p;
3859 const int regno2 = *(const int *) v2p;
3860 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
3861 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
3862 int diff, slot_num1, slot_num2;
3863 int total_size1, total_size2;
3865 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
3867 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3868 return regno1 - regno2;
3871 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3873 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
3874 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
3875 if ((diff = slot_num1 - slot_num2) != 0)
3876 return (frame_pointer_needed
3877 || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
3878 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1),
3879 regno_max_ref_width[regno1]);
3880 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2),
3881 regno_max_ref_width[regno2]);
3882 if ((diff = total_size2 - total_size1) != 0)
3884 return regno1 - regno2;
3887 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3888 for coalesced allocno sets containing allocnos with their regnos
3889 given in array PSEUDO_REGNOS of length N. */
3891 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
3893 int i, num, regno, cost;
3894 ira_allocno_t allocno, a;
3896 for (num = i = 0; i < n; i++)
3898 regno = pseudo_regnos[i];
3899 allocno = ira_regno_allocno_map[regno];
3900 if (allocno == NULL)
3902 regno_coalesced_allocno_cost[regno] = 0;
3903 regno_coalesced_allocno_num[regno] = ++num;
3906 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3909 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3910 a = ALLOCNO_COALESCE_DATA (a)->next)
3912 cost += ALLOCNO_FREQ (a);
3916 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3917 a = ALLOCNO_COALESCE_DATA (a)->next)
3919 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
3920 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
3927 /* Collect spilled allocnos representing coalesced allocno sets (the
3928 first coalesced allocno). The collected allocnos are returned
3929 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3930 number of the collected allocnos. The allocnos are given by their
3931 regnos in array PSEUDO_REGNOS of length N. */
3933 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
3934 ira_allocno_t *spilled_coalesced_allocnos)
3937 ira_allocno_t allocno;
3939 for (num = i = 0; i < n; i++)
3941 regno = pseudo_regnos[i];
3942 allocno = ira_regno_allocno_map[regno];
3943 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
3944 || ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3946 spilled_coalesced_allocnos[num++] = allocno;
3951 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3952 given slot contains live ranges of coalesced allocnos assigned to
3954 static live_range_t *slot_coalesced_allocnos_live_ranges;
3956 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3957 ranges intersected with live ranges of coalesced allocnos assigned
3958 to slot with number N. */
3960 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
3964 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3965 a = ALLOCNO_COALESCE_DATA (a)->next)
3968 int nr = ALLOCNO_NUM_OBJECTS (a);
3970 for (i = 0; i < nr; i++)
3972 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3974 if (ira_live_ranges_intersect_p
3975 (slot_coalesced_allocnos_live_ranges[n],
3976 OBJECT_LIVE_RANGES (obj)))
3985 /* Update live ranges of slot to which coalesced allocnos represented
3986 by ALLOCNO were assigned. */
3988 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
3994 n = ALLOCNO_COALESCE_DATA (allocno)->temp;
3995 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3996 a = ALLOCNO_COALESCE_DATA (a)->next)
3998 int nr = ALLOCNO_NUM_OBJECTS (a);
3999 for (i = 0; i < nr; i++)
4001 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4003 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
4004 slot_coalesced_allocnos_live_ranges[n]
4005 = ira_merge_live_ranges
4006 (slot_coalesced_allocnos_live_ranges[n], r);
4013 /* We have coalesced allocnos involving in copies. Coalesce allocnos
4014 further in order to share the same memory stack slot. Allocnos
4015 representing sets of allocnos coalesced before the call are given
4016 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
4017 some allocnos were coalesced in the function. */
4019 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
4021 int i, j, n, last_coalesced_allocno_num;
4022 ira_allocno_t allocno, a;
4023 bool merged_p = false;
4024 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
4026 slot_coalesced_allocnos_live_ranges
4027 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
4028 memset (slot_coalesced_allocnos_live_ranges, 0,
4029 sizeof (live_range_t) * ira_allocnos_num);
4030 last_coalesced_allocno_num = 0;
4031 /* Coalesce non-conflicting spilled allocnos preferring most
4033 for (i = 0; i < num; i++)
4035 allocno = spilled_coalesced_allocnos[i];
4036 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4037 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
4038 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4040 for (j = 0; j < i; j++)
4042 a = spilled_coalesced_allocnos[j];
4043 n = ALLOCNO_COALESCE_DATA (a)->temp;
4044 if (ALLOCNO_COALESCE_DATA (a)->first == a
4045 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
4046 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a))
4047 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
4052 /* No coalescing: set up number for coalesced allocnos
4053 represented by ALLOCNO. */
4054 ALLOCNO_COALESCE_DATA (allocno)->temp = last_coalesced_allocno_num++;
4055 setup_slot_coalesced_allocno_live_ranges (allocno);
4059 allocno_coalesced_p = true;
4061 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4062 fprintf (ira_dump_file,
4063 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
4064 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
4065 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
4066 ALLOCNO_COALESCE_DATA (allocno)->temp
4067 = ALLOCNO_COALESCE_DATA (a)->temp;
4068 setup_slot_coalesced_allocno_live_ranges (allocno);
4069 merge_allocnos (a, allocno);
4070 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a);
4073 for (i = 0; i < ira_allocnos_num; i++)
4074 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
4075 ira_free (slot_coalesced_allocnos_live_ranges);
4079 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
4080 subsequent assigning stack slots to them in the reload pass. To do
4081 this we coalesce spilled allocnos first to decrease the number of
4082 memory-memory move insns. This function is called by the
4085 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
4086 unsigned int *reg_max_ref_width)
4088 int max_regno = max_reg_num ();
4089 int i, regno, num, slot_num;
4090 ira_allocno_t allocno, a;
4091 ira_allocno_iterator ai;
4092 ira_allocno_t *spilled_coalesced_allocnos;
4094 ira_assert (! ira_use_lra_p);
4096 /* Set up allocnos can be coalesced. */
4097 coloring_allocno_bitmap = ira_allocate_bitmap ();
4098 for (i = 0; i < n; i++)
4100 regno = pseudo_regnos[i];
4101 allocno = ira_regno_allocno_map[regno];
4102 if (allocno != NULL)
4103 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno));
4105 allocno_coalesced_p = false;
4106 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
4107 allocno_coalesce_data
4108 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
4109 * ira_allocnos_num);
4110 /* Initialize coalesce data for allocnos. */
4111 FOR_EACH_ALLOCNO (a, ai)
4113 ALLOCNO_ADD_DATA (a) = allocno_coalesce_data + ALLOCNO_NUM (a);
4114 ALLOCNO_COALESCE_DATA (a)->first = a;
4115 ALLOCNO_COALESCE_DATA (a)->next = a;
4117 coalesce_allocnos ();
4118 ira_free_bitmap (coloring_allocno_bitmap);
4119 regno_coalesced_allocno_cost
4120 = (int *) ira_allocate (max_regno * sizeof (int));
4121 regno_coalesced_allocno_num
4122 = (int *) ira_allocate (max_regno * sizeof (int));
4123 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
4124 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4125 /* Sort regnos according frequencies of the corresponding coalesced
4127 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
4128 spilled_coalesced_allocnos
4129 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
4130 * sizeof (ira_allocno_t));
4131 /* Collect allocnos representing the spilled coalesced allocno
4133 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4134 spilled_coalesced_allocnos);
4135 if (flag_ira_share_spill_slots
4136 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
4138 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4139 qsort (pseudo_regnos, n, sizeof (int),
4140 coalesced_pseudo_reg_freq_compare);
4141 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4142 spilled_coalesced_allocnos);
4144 ira_free_bitmap (processed_coalesced_allocno_bitmap);
4145 allocno_coalesced_p = false;
4146 /* Assign stack slot numbers to spilled allocno sets, use smaller
4147 numbers for most frequently used coalesced allocnos. -1 is
4148 reserved for dynamic search of stack slots for pseudos spilled by
4151 for (i = 0; i < num; i++)
4153 allocno = spilled_coalesced_allocnos[i];
4154 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4155 || ALLOCNO_HARD_REGNO (allocno) >= 0
4156 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4158 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4159 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
4161 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4162 a = ALLOCNO_COALESCE_DATA (a)->next)
4164 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
4165 ALLOCNO_HARD_REGNO (a) = -slot_num;
4166 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4167 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
4168 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
4169 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
4170 reg_max_ref_width[ALLOCNO_REGNO (a)]));
4175 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4176 fprintf (ira_dump_file, "\n");
4178 ira_spilled_reg_stack_slots_num = slot_num - 1;
4179 ira_free (spilled_coalesced_allocnos);
4180 /* Sort regnos according the slot numbers. */
4181 regno_max_ref_width = reg_max_ref_width;
4182 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
4183 FOR_EACH_ALLOCNO (a, ai)
4184 ALLOCNO_ADD_DATA (a) = NULL;
4185 ira_free (allocno_coalesce_data);
4186 ira_free (regno_coalesced_allocno_num);
4187 ira_free (regno_coalesced_allocno_cost);
4192 /* This page contains code used by the reload pass to improve the
4195 /* The function is called from reload to mark changes in the
4196 allocation of REGNO made by the reload. Remember that reg_renumber
4197 reflects the change result. */
4199 ira_mark_allocation_change (int regno)
4201 ira_allocno_t a = ira_regno_allocno_map[regno];
4202 int old_hard_regno, hard_regno, cost;
4203 enum reg_class aclass = ALLOCNO_CLASS (a);
4205 ira_assert (a != NULL);
4206 hard_regno = reg_renumber[regno];
4207 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
4209 if (old_hard_regno < 0)
4210 cost = -ALLOCNO_MEMORY_COST (a);
4213 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0);
4214 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
4215 ? ALLOCNO_CLASS_COST (a)
4216 : ALLOCNO_HARD_REG_COSTS (a)
4217 [ira_class_hard_reg_index[aclass][old_hard_regno]]);
4218 update_costs_from_copies (a, false, false);
4220 ira_overall_cost -= cost;
4221 ALLOCNO_HARD_REGNO (a) = hard_regno;
4224 ALLOCNO_HARD_REGNO (a) = -1;
4225 cost += ALLOCNO_MEMORY_COST (a);
4227 else if (ira_class_hard_reg_index[aclass][hard_regno] >= 0)
4229 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
4230 ? ALLOCNO_CLASS_COST (a)
4231 : ALLOCNO_HARD_REG_COSTS (a)
4232 [ira_class_hard_reg_index[aclass][hard_regno]]);
4233 update_costs_from_copies (a, true, false);
4236 /* Reload changed class of the allocno. */
4238 ira_overall_cost += cost;
4241 /* This function is called when reload deletes memory-memory move. In
4242 this case we marks that the allocation of the corresponding
4243 allocnos should be not changed in future. Otherwise we risk to get
4246 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
4248 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
4249 ira_allocno_t src = ira_regno_allocno_map[src_regno];
4251 ira_assert (dst != NULL && src != NULL
4252 && ALLOCNO_HARD_REGNO (dst) < 0
4253 && ALLOCNO_HARD_REGNO (src) < 0);
4254 ALLOCNO_DONT_REASSIGN_P (dst) = true;
4255 ALLOCNO_DONT_REASSIGN_P (src) = true;
4258 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
4259 allocno A and return TRUE in the case of success. */
4261 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
4264 enum reg_class aclass;
4265 int regno = ALLOCNO_REGNO (a);
4266 HARD_REG_SET saved[2];
4269 n = ALLOCNO_NUM_OBJECTS (a);
4270 for (i = 0; i < n; i++)
4272 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4273 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
4274 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
4275 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
4276 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
4279 ALLOCNO_ASSIGNED_P (a) = false;
4280 aclass = ALLOCNO_CLASS (a);
4281 update_curr_costs (a);
4282 assign_hard_reg (a, true);
4283 hard_regno = ALLOCNO_HARD_REGNO (a);
4284 reg_renumber[regno] = hard_regno;
4286 ALLOCNO_HARD_REGNO (a) = -1;
4289 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0);
4291 -= (ALLOCNO_MEMORY_COST (a)
4292 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
4293 ? ALLOCNO_CLASS_COST (a)
4294 : ALLOCNO_HARD_REG_COSTS (a)[ira_class_hard_reg_index
4295 [aclass][hard_regno]]));
4296 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
4297 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
4300 ira_assert (flag_caller_saves);
4301 caller_save_needed = 1;
4305 /* If we found a hard register, modify the RTL for the pseudo
4306 register to show the hard register, and mark the pseudo register
4308 if (reg_renumber[regno] >= 0)
4310 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4311 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
4312 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
4313 mark_home_live (regno);
4315 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4316 fprintf (ira_dump_file, "\n");
4317 for (i = 0; i < n; i++)
4319 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4320 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
4322 return reg_renumber[regno] >= 0;
4325 /* Sort pseudos according their usage frequencies (putting most
4326 frequently ones first). */
4328 pseudo_reg_compare (const void *v1p, const void *v2p)
4330 int regno1 = *(const int *) v1p;
4331 int regno2 = *(const int *) v2p;
4334 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
4336 return regno1 - regno2;
4339 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
4340 NUM of them) or spilled pseudos conflicting with pseudos in
4341 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
4342 allocation has been changed. The function doesn't use
4343 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
4344 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
4345 is called by the reload pass at the end of each reload
4348 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
4349 HARD_REG_SET bad_spill_regs,
4350 HARD_REG_SET *pseudo_forbidden_regs,
4351 HARD_REG_SET *pseudo_previous_regs,
4357 HARD_REG_SET forbidden_regs;
4358 bitmap temp = BITMAP_ALLOC (NULL);
4360 /* Add pseudos which conflict with pseudos already in
4361 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
4362 to allocating in two steps as some of the conflicts might have
4363 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
4364 for (i = 0; i < num; i++)
4365 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
4367 for (i = 0, n = num; i < n; i++)
4370 int regno = spilled_pseudo_regs[i];
4371 bitmap_set_bit (temp, regno);
4373 a = ira_regno_allocno_map[regno];
4374 nr = ALLOCNO_NUM_OBJECTS (a);
4375 for (j = 0; j < nr; j++)
4377 ira_object_t conflict_obj;
4378 ira_object_t obj = ALLOCNO_OBJECT (a, j);
4379 ira_object_conflict_iterator oci;
4381 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
4383 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
4384 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
4385 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
4386 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
4388 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
4389 /* ?!? This seems wrong. */
4390 bitmap_set_bit (consideration_allocno_bitmap,
4391 ALLOCNO_NUM (conflict_a));
4398 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
4400 /* Try to assign hard registers to pseudos from
4401 SPILLED_PSEUDO_REGS. */
4402 for (i = 0; i < num; i++)
4404 regno = spilled_pseudo_regs[i];
4405 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
4406 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
4407 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
4408 gcc_assert (reg_renumber[regno] < 0);
4409 a = ira_regno_allocno_map[regno];
4410 ira_mark_allocation_change (regno);
4411 ira_assert (reg_renumber[regno] < 0);
4412 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4413 fprintf (ira_dump_file,
4414 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
4415 ALLOCNO_MEMORY_COST (a)
4416 - ALLOCNO_CLASS_COST (a));
4417 allocno_reload_assign (a, forbidden_regs);
4418 if (reg_renumber[regno] >= 0)
4420 CLEAR_REGNO_REG_SET (spilled, regno);
4428 /* The function is called by reload and returns already allocated
4429 stack slot (if any) for REGNO with given INHERENT_SIZE and
4430 TOTAL_SIZE. In the case of failure to find a slot which can be
4431 used for REGNO, the function returns NULL. */
4433 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
4434 unsigned int total_size)
4437 int slot_num, best_slot_num;
4438 int cost, best_cost;
4439 ira_copy_t cp, next_cp;
4440 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4443 struct ira_spilled_reg_stack_slot *slot = NULL;
4445 ira_assert (! ira_use_lra_p);
4447 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
4448 && inherent_size <= total_size
4449 && ALLOCNO_HARD_REGNO (allocno) < 0);
4450 if (! flag_ira_share_spill_slots)
4452 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4455 slot = &ira_spilled_reg_stack_slots[slot_num];
4460 best_cost = best_slot_num = -1;
4462 /* It means that the pseudo was spilled in the reload pass, try
4465 slot_num < ira_spilled_reg_stack_slots_num;
4468 slot = &ira_spilled_reg_stack_slots[slot_num];
4469 if (slot->mem == NULL_RTX)
4471 if (slot->width < total_size
4472 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
4475 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4476 FIRST_PSEUDO_REGISTER, i, bi)
4478 another_allocno = ira_regno_allocno_map[i];
4479 if (allocnos_conflict_by_live_ranges_p (allocno,
4483 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
4487 if (cp->first == allocno)
4489 next_cp = cp->next_first_allocno_copy;
4490 another_allocno = cp->second;
4492 else if (cp->second == allocno)
4494 next_cp = cp->next_second_allocno_copy;
4495 another_allocno = cp->first;
4499 if (cp->insn == NULL_RTX)
4501 if (bitmap_bit_p (&slot->spilled_regs,
4502 ALLOCNO_REGNO (another_allocno)))
4505 if (cost > best_cost)
4508 best_slot_num = slot_num;
4515 slot_num = best_slot_num;
4516 slot = &ira_spilled_reg_stack_slots[slot_num];
4517 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4519 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4524 ira_assert (slot->width >= total_size);
4525 #ifdef ENABLE_IRA_CHECKING
4526 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4527 FIRST_PSEUDO_REGISTER, i, bi)
4529 ira_assert (! conflict_by_live_ranges_p (regno, i));
4532 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4533 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4535 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4536 regno, REG_FREQ (regno), slot_num);
4537 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4538 FIRST_PSEUDO_REGISTER, i, bi)
4540 if ((unsigned) regno != i)
4541 fprintf (ira_dump_file, " %d", i);
4543 fprintf (ira_dump_file, "\n");
4549 /* This is called by reload every time a new stack slot X with
4550 TOTAL_SIZE was allocated for REGNO. We store this info for
4551 subsequent ira_reuse_stack_slot calls. */
4553 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
4555 struct ira_spilled_reg_stack_slot *slot;
4557 ira_allocno_t allocno;
4559 ira_assert (! ira_use_lra_p);
4561 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
4562 allocno = ira_regno_allocno_map[regno];
4563 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4566 slot_num = ira_spilled_reg_stack_slots_num++;
4567 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4569 slot = &ira_spilled_reg_stack_slots[slot_num];
4570 INIT_REG_SET (&slot->spilled_regs);
4571 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4573 slot->width = total_size;
4574 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4575 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
4576 regno, REG_FREQ (regno), slot_num);
4580 /* Return spill cost for pseudo-registers whose numbers are in array
4581 REGNOS (with a negative number as an end marker) for reload with
4582 given IN and OUT for INSN. Return also number points (through
4583 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4584 the register pressure is high, number of references of the
4585 pseudo-registers (through NREFS), number of callee-clobbered
4586 hard-registers occupied by the pseudo-registers (through
4587 CALL_USED_COUNT), and the first hard regno occupied by the
4588 pseudo-registers (through FIRST_HARD_REGNO). */
4590 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx_insn *insn,
4591 int *excess_pressure_live_length,
4592 int *nrefs, int *call_used_count, int *first_hard_regno)
4594 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
4600 for (length = count = cost = i = 0;; i++)
4605 *nrefs += REG_N_REFS (regno);
4606 hard_regno = reg_renumber[regno];
4607 ira_assert (hard_regno >= 0);
4608 a = ira_regno_allocno_map[regno];
4609 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
4610 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
4611 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
4612 for (j = 0; j < nregs; j++)
4613 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
4617 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
4618 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
4620 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
4624 saved_cost += ira_memory_move_cost
4625 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][1];
4628 += ira_memory_move_cost
4629 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][0];
4630 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
4633 *excess_pressure_live_length = length;
4634 *call_used_count = count;
4638 hard_regno = reg_renumber[regnos[0]];
4640 *first_hard_regno = hard_regno;
4644 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4645 REGNOS is better than spilling pseudo-registers with numbers in
4646 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4647 function used by the reload pass to make better register spilling
4650 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
4651 rtx in, rtx out, rtx_insn *insn)
4653 int cost, other_cost;
4654 int length, other_length;
4655 int nrefs, other_nrefs;
4656 int call_used_count, other_call_used_count;
4657 int hard_regno, other_hard_regno;
4659 cost = calculate_spill_cost (regnos, in, out, insn,
4660 &length, &nrefs, &call_used_count, &hard_regno);
4661 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
4662 &other_length, &other_nrefs,
4663 &other_call_used_count,
4665 if (nrefs == 0 && other_nrefs != 0)
4667 if (nrefs != 0 && other_nrefs == 0)
4669 if (cost != other_cost)
4670 return cost < other_cost;
4671 if (length != other_length)
4672 return length > other_length;
4673 #ifdef REG_ALLOC_ORDER
4674 if (hard_regno >= 0 && other_hard_regno >= 0)
4675 return (inv_reg_alloc_order[hard_regno]
4676 < inv_reg_alloc_order[other_hard_regno]);
4678 if (call_used_count != other_call_used_count)
4679 return call_used_count > other_call_used_count;
4686 /* Allocate and initialize data necessary for assign_hard_reg. */
4688 ira_initiate_assign (void)
4691 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4692 * ira_allocnos_num);
4693 consideration_allocno_bitmap = ira_allocate_bitmap ();
4694 initiate_cost_update ();
4695 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4696 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
4697 * sizeof (ira_copy_t));
4700 /* Deallocate data used by assign_hard_reg. */
4702 ira_finish_assign (void)
4704 ira_free (sorted_allocnos);
4705 ira_free_bitmap (consideration_allocno_bitmap);
4706 finish_cost_update ();
4707 ira_free (allocno_priorities);
4708 ira_free (sorted_copies);
4713 /* Entry function doing color-based register allocation. */
4717 allocno_stack_vec.create (ira_allocnos_num);
4718 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
4719 ira_initiate_assign ();
4721 ira_finish_assign ();
4722 allocno_stack_vec.release ();
4723 move_spill_restore ();
4728 /* This page contains a simple register allocator without usage of
4729 allocno conflicts. This is used for fast allocation for -O0. */
4731 /* Do register allocation by not using allocno conflicts. It uses
4732 only allocno live ranges. The algorithm is close to Chow's
4733 priority coloring. */
4735 fast_allocation (void)
4737 int i, j, k, num, class_size, hard_regno;
4739 bool no_stack_reg_p;
4741 enum reg_class aclass;
4744 ira_allocno_iterator ai;
4746 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
4748 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4749 * ira_allocnos_num);
4751 FOR_EACH_ALLOCNO (a, ai)
4752 sorted_allocnos[num++] = a;
4753 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4754 setup_allocno_priorities (sorted_allocnos, num);
4755 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
4757 for (i = 0; i < ira_max_point; i++)
4758 CLEAR_HARD_REG_SET (used_hard_regs[i]);
4759 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
4760 allocno_priority_compare_func);
4761 for (i = 0; i < num; i++)
4765 a = sorted_allocnos[i];
4766 nr = ALLOCNO_NUM_OBJECTS (a);
4767 CLEAR_HARD_REG_SET (conflict_hard_regs);
4768 for (l = 0; l < nr; l++)
4770 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4771 IOR_HARD_REG_SET (conflict_hard_regs,
4772 OBJECT_CONFLICT_HARD_REGS (obj));
4773 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4774 for (j = r->start; j <= r->finish; j++)
4775 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
4777 aclass = ALLOCNO_CLASS (a);
4778 ALLOCNO_ASSIGNED_P (a) = true;
4779 ALLOCNO_HARD_REGNO (a) = -1;
4780 if (hard_reg_set_subset_p (reg_class_contents[aclass],
4781 conflict_hard_regs))
4783 mode = ALLOCNO_MODE (a);
4785 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
4787 class_size = ira_class_hard_regs_num[aclass];
4788 for (j = 0; j < class_size; j++)
4790 hard_regno = ira_class_hard_regs[aclass][j];
4792 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
4793 && hard_regno <= LAST_STACK_REG)
4796 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
4797 || (TEST_HARD_REG_BIT
4798 (ira_prohibited_class_mode_regs[aclass][mode], hard_regno)))
4800 ALLOCNO_HARD_REGNO (a) = hard_regno;
4801 for (l = 0; l < nr; l++)
4803 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4804 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4805 for (k = r->start; k <= r->finish; k++)
4806 IOR_HARD_REG_SET (used_hard_regs[k],
4807 ira_reg_mode_hard_regset[hard_regno][mode]);
4812 ira_free (sorted_allocnos);
4813 ira_free (used_hard_regs);
4814 ira_free (allocno_priorities);
4815 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
4816 ira_print_disposition (ira_dump_file);
4821 /* Entry function doing coloring. */
4826 ira_allocno_iterator ai;
4828 /* Setup updated costs. */
4829 FOR_EACH_ALLOCNO (a, ai)
4831 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
4832 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
4834 if (ira_conflicts_p)