1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006, 2007, 2008
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
40 #include "splay-tree.h"
43 /* This file contains code for regional graph coloring, spill/restore
44 code placement optimization, and code helping the reload pass to do
47 /* Bitmap of allocnos which should be colored. */
48 static bitmap coloring_allocno_bitmap;
50 /* Bitmap of allocnos which should be taken into account during
51 coloring. In general case it contains allocnos from
52 coloring_allocno_bitmap plus other already colored conflicting
54 static bitmap consideration_allocno_bitmap;
56 /* TRUE if we coalesced some allocnos. In other words, if we got
57 loops formed by members first_coalesced_allocno and
58 next_coalesced_allocno containing more one allocno. */
59 static bool allocno_coalesced_p;
61 /* Bitmap used to prevent a repeated allocno processing because of
63 static bitmap processed_coalesced_allocno_bitmap;
65 /* All allocnos sorted according their priorities. */
66 static ira_allocno_t *sorted_allocnos;
68 /* Vec representing the stack of allocnos used during coloring. */
69 static VEC(ira_allocno_t,heap) *allocno_stack_vec;
71 /* Array used to choose an allocno for spilling. */
72 static ira_allocno_t *allocnos_for_spilling;
74 /* Pool for splay tree nodes. */
75 static alloc_pool splay_tree_node_pool;
77 /* When an allocno is removed from the splay tree, it is put in the
78 following vector for subsequent inserting it into the splay tree
79 after putting all colorable allocnos onto the stack. The allocno
80 could be removed from and inserted to the splay tree every time
81 when its spilling priority is changed but such solution would be
82 more costly although simpler. */
83 static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec;
87 /* This page contains functions used to choose hard registers for
90 /* Array whose element value is TRUE if the corresponding hard
91 register was already allocated for an allocno. */
92 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
94 /* Describes one element in a queue of allocnos whose costs need to be
95 updated. Each allocno in the queue is known to have a cover class. */
96 struct update_cost_queue_elem
98 /* This element is in the queue iff CHECK == update_cost_check. */
101 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
102 connecting this allocno to the one being allocated. */
105 /* The next allocno in the queue, or null if this is the last element. */
109 /* The first element in a queue of allocnos whose copy costs need to be
110 updated. Null if the queue is empty. */
111 static ira_allocno_t update_cost_queue;
113 /* The last element in the queue described by update_cost_queue.
114 Not valid if update_cost_queue is null. */
115 static struct update_cost_queue_elem *update_cost_queue_tail;
117 /* A pool of elements in the queue described by update_cost_queue.
118 Elements are indexed by ALLOCNO_NUM. */
119 static struct update_cost_queue_elem *update_cost_queue_elems;
121 /* The current value of update_copy_cost call count. */
122 static int update_cost_check;
124 /* Allocate and initialize data necessary for function
125 update_copy_costs. */
127 initiate_cost_update (void)
131 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
132 update_cost_queue_elems
133 = (struct update_cost_queue_elem *) ira_allocate (size);
134 memset (update_cost_queue_elems, 0, size);
135 update_cost_check = 0;
138 /* Deallocate data used by function update_copy_costs. */
140 finish_cost_update (void)
142 ira_free (update_cost_queue_elems);
145 /* When we traverse allocnos to update hard register costs, the cost
146 divisor will be multiplied by the following macro value for each
147 hop from given allocno to directly connected allocnos. */
148 #define COST_HOP_DIVISOR 4
150 /* Start a new cost-updating pass. */
152 start_update_cost (void)
155 update_cost_queue = NULL;
158 /* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue,
159 unless ALLOCNO is already in the queue, or has no cover class. */
161 queue_update_cost (ira_allocno_t allocno, int divisor)
163 struct update_cost_queue_elem *elem;
165 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
166 if (elem->check != update_cost_check
167 && ALLOCNO_COVER_CLASS (allocno) != NO_REGS)
169 elem->check = update_cost_check;
170 elem->divisor = divisor;
172 if (update_cost_queue == NULL)
173 update_cost_queue = allocno;
175 update_cost_queue_tail->next = allocno;
176 update_cost_queue_tail = elem;
180 /* Try to remove the first element from update_cost_queue. Return false
181 if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
182 the removed element. */
184 get_next_update_cost (ira_allocno_t *allocno, int *divisor)
186 struct update_cost_queue_elem *elem;
188 if (update_cost_queue == NULL)
191 *allocno = update_cost_queue;
192 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
193 *divisor = elem->divisor;
194 update_cost_queue = elem->next;
198 /* Update the cost of allocnos to increase chances to remove some
199 copies as the result of subsequent assignment. */
201 update_copy_costs (ira_allocno_t allocno, bool decr_p)
203 int i, cost, update_cost, hard_regno, divisor;
204 enum machine_mode mode;
205 enum reg_class rclass, cover_class;
206 ira_allocno_t another_allocno;
207 ira_copy_t cp, next_cp;
209 hard_regno = ALLOCNO_HARD_REGNO (allocno);
210 ira_assert (hard_regno >= 0);
212 cover_class = ALLOCNO_COVER_CLASS (allocno);
213 if (cover_class == NO_REGS)
215 i = ira_class_hard_reg_index[cover_class][hard_regno];
217 rclass = REGNO_REG_CLASS (hard_regno);
219 start_update_cost ();
223 mode = ALLOCNO_MODE (allocno);
224 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
226 if (cp->first == allocno)
228 next_cp = cp->next_first_allocno_copy;
229 another_allocno = cp->second;
231 else if (cp->second == allocno)
233 next_cp = cp->next_second_allocno_copy;
234 another_allocno = cp->first;
239 if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
240 || ALLOCNO_ASSIGNED_P (another_allocno))
243 cost = (cp->second == allocno
244 ? ira_register_move_cost[mode][rclass][cover_class]
245 : ira_register_move_cost[mode][cover_class][rclass]);
249 update_cost = cp->freq * cost / divisor;
250 if (update_cost == 0)
253 ira_allocate_and_set_or_copy_costs
254 (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
255 ALLOCNO_UPDATED_COVER_CLASS_COST (another_allocno),
256 ALLOCNO_HARD_REG_COSTS (another_allocno));
257 ira_allocate_and_set_or_copy_costs
258 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
260 ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
261 ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
262 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
265 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
268 while (get_next_update_cost (&allocno, &divisor));
271 /* This function updates COSTS (decrease if DECR_P) by conflict costs
272 of the unassigned allocnos connected by copies with allocnos in
273 update_cost_queue. This update increases chances to remove some
276 update_conflict_hard_regno_costs (int *costs, bool decr_p)
278 int i, cost, class_size, freq, mult, div, divisor;
281 enum reg_class cover_class;
282 ira_allocno_t allocno, another_allocno;
283 ira_copy_t cp, next_cp;
285 while (get_next_update_cost (&allocno, &divisor))
286 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
288 if (cp->first == allocno)
290 next_cp = cp->next_first_allocno_copy;
291 another_allocno = cp->second;
293 else if (cp->second == allocno)
295 next_cp = cp->next_second_allocno_copy;
296 another_allocno = cp->first;
300 cover_class = ALLOCNO_COVER_CLASS (allocno);
301 if (cover_class != ALLOCNO_COVER_CLASS (another_allocno)
302 || ALLOCNO_ASSIGNED_P (another_allocno)
303 || ALLOCNO_MAY_BE_SPILLED_P (ALLOCNO_FIRST_COALESCED_ALLOCNO
306 class_size = ira_class_hard_regs_num[cover_class];
307 ira_allocate_and_copy_costs
308 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
309 cover_class, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
311 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
312 if (conflict_costs == NULL)
317 freq = ALLOCNO_FREQ (another_allocno);
320 div = freq * divisor;
322 for (i = class_size - 1; i >= 0; i--)
324 cost = conflict_costs [i] * mult / div;
333 /* Probably 5 hops will be enough. */
335 && divisor <= (COST_HOP_DIVISOR
339 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
343 /* Sort allocnos according to the profit of usage of a hard register
344 instead of memory for them. */
346 allocno_cost_compare_func (const void *v1p, const void *v2p)
348 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
349 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
352 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_COVER_CLASS_COST (p1);
353 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_COVER_CLASS_COST (p2);
357 /* If regs are equally good, sort by allocno numbers, so that the
358 results of qsort leave nothing to chance. */
359 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
362 /* Print all allocnos coalesced with ALLOCNO. */
364 print_coalesced_allocno (ira_allocno_t allocno)
368 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
369 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
371 ira_print_expanded_allocno (a);
374 fprintf (ira_dump_file, "+");
378 /* Choose a hard register for ALLOCNO (or for all coalesced allocnos
379 represented by ALLOCNO). If RETRY_P is TRUE, it means that the
380 function called from function `ira_reassign_conflict_allocnos' and
381 `allocno_reload_assign'. This function implements the optimistic
382 coalescing too: if we failed to assign a hard register to set of
383 the coalesced allocnos, we put them onto the coloring stack for
384 subsequent separate assigning. */
386 assign_hard_reg (ira_allocno_t allocno, bool retry_p)
388 HARD_REG_SET conflicting_regs;
389 int i, j, hard_regno, best_hard_regno, class_size;
390 int cost, mem_cost, min_cost, full_cost, min_full_cost, add_cost;
393 enum reg_class cover_class, rclass;
394 enum machine_mode mode;
395 ira_allocno_t a, conflict_allocno;
396 ira_allocno_conflict_iterator aci;
397 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
402 ira_assert (! ALLOCNO_ASSIGNED_P (allocno));
403 cover_class = ALLOCNO_COVER_CLASS (allocno);
404 class_size = ira_class_hard_regs_num[cover_class];
405 mode = ALLOCNO_MODE (allocno);
406 CLEAR_HARD_REG_SET (conflicting_regs);
407 best_hard_regno = -1;
408 memset (full_costs, 0, sizeof (int) * class_size);
410 if (allocno_coalesced_p)
411 bitmap_clear (processed_coalesced_allocno_bitmap);
412 memset (costs, 0, sizeof (int) * class_size);
413 memset (full_costs, 0, sizeof (int) * class_size);
415 no_stack_reg_p = false;
417 start_update_cost ();
418 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
419 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
421 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
422 IOR_HARD_REG_SET (conflicting_regs,
423 ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
424 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
425 cover_class, ALLOCNO_HARD_REG_COSTS (a));
426 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
428 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
430 for (cost = ALLOCNO_UPDATED_COVER_CLASS_COST (a), i = 0;
435 costs[i] += a_costs[i];
436 full_costs[i] += a_costs[i];
441 full_costs[i] += cost;
443 /* Take preferences of conflicting allocnos into account. */
444 FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
445 /* Reload can give another class so we need to check all
447 if (retry_p || bitmap_bit_p (consideration_allocno_bitmap,
448 ALLOCNO_NUM (conflict_allocno)))
450 ira_assert (cover_class == ALLOCNO_COVER_CLASS (conflict_allocno));
451 if (allocno_coalesced_p)
453 if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
454 ALLOCNO_NUM (conflict_allocno)))
456 bitmap_set_bit (processed_coalesced_allocno_bitmap,
457 ALLOCNO_NUM (conflict_allocno));
459 if (ALLOCNO_ASSIGNED_P (conflict_allocno))
461 if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno)) >= 0)
465 ira_reg_mode_hard_regset
466 [hard_regno][ALLOCNO_MODE (conflict_allocno)]);
467 if (hard_reg_set_subset_p (reg_class_contents[cover_class],
473 else if (! ALLOCNO_MAY_BE_SPILLED_P (ALLOCNO_FIRST_COALESCED_ALLOCNO
476 ira_allocate_and_copy_costs
477 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno),
479 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_allocno));
481 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno);
482 if (conflict_costs != NULL)
483 for (j = class_size - 1; j >= 0; j--)
484 full_costs[j] -= conflict_costs[j];
485 queue_update_cost (conflict_allocno, COST_HOP_DIVISOR);
491 /* Take into account preferences of allocnos connected by copies to
492 the conflict allocnos. */
493 update_conflict_hard_regno_costs (full_costs, true);
495 /* Take preferences of allocnos connected by copies into
497 start_update_cost ();
498 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
499 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
501 queue_update_cost (a, COST_HOP_DIVISOR);
505 update_conflict_hard_regno_costs (full_costs, false);
506 min_cost = min_full_cost = INT_MAX;
507 /* We don't care about giving callee saved registers to allocnos no
508 living through calls because call clobbered registers are
509 allocated first (it is usual practice to put them first in
511 for (i = 0; i < class_size; i++)
513 hard_regno = ira_class_hard_regs[cover_class][i];
516 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
519 if (! ira_hard_reg_not_in_set_p (hard_regno, mode, conflicting_regs)
520 || TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
524 full_cost = full_costs[i];
525 if (! allocated_hardreg_p[hard_regno]
526 && ira_hard_reg_not_in_set_p (hard_regno, mode, call_used_reg_set))
527 /* We need to save/restore the hard register in
528 epilogue/prologue. Therefore we increase the cost. */
530 /* ??? If only part is call clobbered. */
531 rclass = REGNO_REG_CLASS (hard_regno);
532 add_cost = (ira_memory_move_cost[mode][rclass][0]
533 + ira_memory_move_cost[mode][rclass][1] - 1);
535 full_cost += add_cost;
539 if (min_full_cost > full_cost)
541 min_full_cost = full_cost;
542 best_hard_regno = hard_regno;
543 ira_assert (hard_regno >= 0);
546 if (min_full_cost > mem_cost)
548 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
549 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
550 mem_cost, min_full_cost);
551 best_hard_regno = -1;
554 if (best_hard_regno < 0
555 && ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno) != allocno)
557 for (j = 0, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
558 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
560 ira_assert (! ALLOCNO_IN_GRAPH_P (a));
561 sorted_allocnos[j++] = a;
565 qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
566 allocno_cost_compare_func);
567 for (i = 0; i < j; i++)
569 a = sorted_allocnos[i];
570 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
571 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
572 VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, a);
573 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
575 fprintf (ira_dump_file, " Pushing");
576 print_coalesced_allocno (a);
577 fprintf (ira_dump_file, "\n");
582 if (best_hard_regno >= 0)
583 allocated_hardreg_p[best_hard_regno] = true;
584 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
585 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
587 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
588 ALLOCNO_ASSIGNED_P (a) = true;
589 if (best_hard_regno >= 0)
590 update_copy_costs (a, true);
591 ira_assert (ALLOCNO_COVER_CLASS (a) == cover_class);
592 /* We don't need updated costs anymore: */
593 ira_free_allocno_updated_costs (a);
597 return best_hard_regno >= 0;
602 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
604 /* Bucket of allocnos that can colored currently without spilling. */
605 static ira_allocno_t colorable_allocno_bucket;
607 /* Bucket of allocnos that might be not colored currently without
609 static ira_allocno_t uncolorable_allocno_bucket;
611 /* Each element of the array contains the current number of allocnos
612 of given *cover* class in the uncolorable_bucket. */
613 static int uncolorable_allocnos_num[N_REG_CLASSES];
615 /* Add ALLOCNO to bucket *BUCKET_PTR. ALLOCNO should be not in a bucket
618 add_allocno_to_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
620 ira_allocno_t first_allocno;
621 enum reg_class cover_class;
623 if (bucket_ptr == &uncolorable_allocno_bucket
624 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
626 uncolorable_allocnos_num[cover_class]++;
627 ira_assert (uncolorable_allocnos_num[cover_class] > 0);
629 first_allocno = *bucket_ptr;
630 ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno) = first_allocno;
631 ALLOCNO_PREV_BUCKET_ALLOCNO (allocno) = NULL;
632 if (first_allocno != NULL)
633 ALLOCNO_PREV_BUCKET_ALLOCNO (first_allocno) = allocno;
634 *bucket_ptr = allocno;
637 /* The function returns frequency and number of available hard
638 registers for allocnos coalesced with ALLOCNO. */
640 get_coalesced_allocnos_attributes (ira_allocno_t allocno, int *freq, int *num)
646 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
647 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
649 *freq += ALLOCNO_FREQ (a);
650 *num += ALLOCNO_AVAILABLE_REGS_NUM (a);
656 /* Compare two allocnos to define which allocno should be pushed first
657 into the coloring stack. If the return is a negative number, the
658 allocno given by the first parameter will be pushed first. In this
659 case such allocno has less priority than the second one and the
660 hard register will be assigned to it after assignment to the second
661 one. As the result of such assignment order, the second allocno
662 has a better chance to get the best hard register. */
664 bucket_allocno_compare_func (const void *v1p, const void *v2p)
666 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
667 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
668 int diff, a1_freq, a2_freq, a1_num, a2_num;
670 if ((diff = (int) ALLOCNO_COVER_CLASS (a2) - ALLOCNO_COVER_CLASS (a1)) != 0)
672 get_coalesced_allocnos_attributes (a1, &a1_freq, &a1_num);
673 get_coalesced_allocnos_attributes (a2, &a2_freq, &a2_num);
674 if ((diff = a2_num - a1_num) != 0)
676 else if ((diff = a1_freq - a2_freq) != 0)
678 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
681 /* Sort bucket *BUCKET_PTR and return the result through
684 sort_bucket (ira_allocno_t *bucket_ptr)
686 ira_allocno_t a, head;
689 for (n = 0, a = *bucket_ptr; a != NULL; a = ALLOCNO_NEXT_BUCKET_ALLOCNO (a))
690 sorted_allocnos[n++] = a;
693 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
694 bucket_allocno_compare_func);
696 for (n--; n >= 0; n--)
698 a = sorted_allocnos[n];
699 ALLOCNO_NEXT_BUCKET_ALLOCNO (a) = head;
700 ALLOCNO_PREV_BUCKET_ALLOCNO (a) = NULL;
702 ALLOCNO_PREV_BUCKET_ALLOCNO (head) = a;
708 /* Add ALLOCNO to bucket *BUCKET_PTR maintaining the order according
709 their priority. ALLOCNO should be not in a bucket before the
712 add_allocno_to_ordered_bucket (ira_allocno_t allocno,
713 ira_allocno_t *bucket_ptr)
715 ira_allocno_t before, after;
716 enum reg_class cover_class;
718 if (bucket_ptr == &uncolorable_allocno_bucket
719 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
721 uncolorable_allocnos_num[cover_class]++;
722 ira_assert (uncolorable_allocnos_num[cover_class] > 0);
724 for (before = *bucket_ptr, after = NULL;
726 after = before, before = ALLOCNO_NEXT_BUCKET_ALLOCNO (before))
727 if (bucket_allocno_compare_func (&allocno, &before) < 0)
729 ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno) = before;
730 ALLOCNO_PREV_BUCKET_ALLOCNO (allocno) = after;
732 *bucket_ptr = allocno;
734 ALLOCNO_NEXT_BUCKET_ALLOCNO (after) = allocno;
736 ALLOCNO_PREV_BUCKET_ALLOCNO (before) = allocno;
739 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
742 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
744 ira_allocno_t prev_allocno, next_allocno;
745 enum reg_class cover_class;
747 if (bucket_ptr == &uncolorable_allocno_bucket
748 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
750 uncolorable_allocnos_num[cover_class]--;
751 ira_assert (uncolorable_allocnos_num[cover_class] >= 0);
753 prev_allocno = ALLOCNO_PREV_BUCKET_ALLOCNO (allocno);
754 next_allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno);
755 if (prev_allocno != NULL)
756 ALLOCNO_NEXT_BUCKET_ALLOCNO (prev_allocno) = next_allocno;
759 ira_assert (*bucket_ptr == allocno);
760 *bucket_ptr = next_allocno;
762 if (next_allocno != NULL)
763 ALLOCNO_PREV_BUCKET_ALLOCNO (next_allocno) = prev_allocno;
766 /* Splay tree for each cover class. The trees are indexed by the
767 corresponding cover classes. Splay trees contain uncolorable
769 static splay_tree uncolorable_allocnos_splay_tree[N_REG_CLASSES];
771 /* If the following macro is TRUE, splay tree is used to choose an
772 allocno of the corresponding cover class for spilling. When the
773 number uncolorable allocnos of given cover class decreases to some
774 threshold, linear array search is used to find the best allocno for
775 spilling. This threshold is actually pretty big because, although
776 splay trees asymptotically is much faster, each splay tree
777 operation is sufficiently costly especially taking cache locality
779 #define USE_SPLAY_P(CLASS) (uncolorable_allocnos_num[CLASS] > 4000)
781 /* Put ALLOCNO onto the coloring stack without removing it from its
782 bucket. Pushing allocno to the coloring stack can result in moving
783 conflicting allocnos from the uncolorable bucket to the colorable
786 push_allocno_to_stack (ira_allocno_t allocno)
788 int conflicts_num, conflict_size, size;
789 ira_allocno_t a, conflict_allocno;
790 enum reg_class cover_class;
791 ira_allocno_conflict_iterator aci;
793 ALLOCNO_IN_GRAPH_P (allocno) = false;
794 VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, allocno);
795 cover_class = ALLOCNO_COVER_CLASS (allocno);
796 if (cover_class == NO_REGS)
798 size = ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)];
799 if (allocno_coalesced_p)
800 bitmap_clear (processed_coalesced_allocno_bitmap);
801 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
802 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
804 FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
806 conflict_allocno = ALLOCNO_FIRST_COALESCED_ALLOCNO (conflict_allocno);
807 if (bitmap_bit_p (coloring_allocno_bitmap,
808 ALLOCNO_NUM (conflict_allocno)))
810 ira_assert (cover_class
811 == ALLOCNO_COVER_CLASS (conflict_allocno));
812 if (allocno_coalesced_p)
814 if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
815 ALLOCNO_NUM (conflict_allocno)))
817 bitmap_set_bit (processed_coalesced_allocno_bitmap,
818 ALLOCNO_NUM (conflict_allocno));
820 if (ALLOCNO_IN_GRAPH_P (conflict_allocno)
821 && ! ALLOCNO_ASSIGNED_P (conflict_allocno))
823 conflicts_num = ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno);
825 = (ira_reg_class_nregs
826 [cover_class][ALLOCNO_MODE (conflict_allocno)]);
828 (ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) >= size);
829 if (conflicts_num + conflict_size
830 <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
832 ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) -= size;
836 = ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) - size;
837 if (uncolorable_allocnos_splay_tree[cover_class] != NULL
838 && !ALLOCNO_SPLAY_REMOVED_P (conflict_allocno)
839 && USE_SPLAY_P (cover_class))
843 (uncolorable_allocnos_splay_tree[cover_class],
844 (splay_tree_key) conflict_allocno) != NULL);
846 (uncolorable_allocnos_splay_tree[cover_class],
847 (splay_tree_key) conflict_allocno);
848 ALLOCNO_SPLAY_REMOVED_P (conflict_allocno) = true;
849 VEC_safe_push (ira_allocno_t, heap,
850 removed_splay_allocno_vec,
853 ALLOCNO_LEFT_CONFLICTS_NUM (conflict_allocno) = conflicts_num;
854 if (conflicts_num + conflict_size
855 <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
857 delete_allocno_from_bucket
858 (conflict_allocno, &uncolorable_allocno_bucket);
859 add_allocno_to_ordered_bucket
860 (conflict_allocno, &colorable_allocno_bucket);
870 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
871 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
873 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
875 enum reg_class cover_class;
878 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
880 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
881 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
883 fprintf (ira_dump_file, " Pushing");
884 print_coalesced_allocno (allocno);
885 fprintf (ira_dump_file, "%s\n", colorable_p ? "" : "(potential spill)");
887 cover_class = ALLOCNO_COVER_CLASS (allocno);
888 ira_assert ((colorable_p
889 && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
890 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
891 <= ALLOCNO_AVAILABLE_REGS_NUM (allocno)))
893 && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
894 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE
896 > ALLOCNO_AVAILABLE_REGS_NUM (allocno))));
898 ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
899 push_allocno_to_stack (allocno);
902 /* Put all allocnos from colorable bucket onto the coloring stack. */
904 push_only_colorable (void)
906 sort_bucket (&colorable_allocno_bucket);
907 for (;colorable_allocno_bucket != NULL;)
908 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
911 /* Puts ALLOCNO chosen for potential spilling onto the coloring
914 push_allocno_to_spill (ira_allocno_t allocno)
916 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
917 ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
918 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
919 fprintf (ira_dump_file, " Pushing p%d(%d) (potential spill)\n",
920 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno));
921 push_allocno_to_stack (allocno);
924 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
925 loop given by its LOOP_NODE. */
927 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
932 VEC (edge, heap) *edges;
934 ira_assert (loop_node->loop != NULL
935 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
939 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
940 if (e->src != loop_node->loop->latch
942 || (bitmap_bit_p (DF_LR_OUT (e->src), regno)
943 && bitmap_bit_p (DF_LR_IN (e->dest), regno))))
944 freq += EDGE_FREQUENCY (e);
948 edges = get_loop_exit_edges (loop_node->loop);
949 for (i = 0; VEC_iterate (edge, edges, i, e); i++)
951 || (bitmap_bit_p (DF_LR_OUT (e->src), regno)
952 && bitmap_bit_p (DF_LR_IN (e->dest), regno)))
953 freq += EDGE_FREQUENCY (e);
954 VEC_free (edge, heap, edges);
957 return REG_FREQ_FROM_EDGE_FREQ (freq);
960 /* Calculate and return the cost of putting allocno A into memory. */
962 calculate_allocno_spill_cost (ira_allocno_t a)
965 enum machine_mode mode;
966 enum reg_class rclass;
967 ira_allocno_t parent_allocno;
968 ira_loop_tree_node_t parent_node, loop_node;
970 regno = ALLOCNO_REGNO (a);
971 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_COVER_CLASS_COST (a);
972 if (ALLOCNO_CAP (a) != NULL)
974 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
975 if ((parent_node = loop_node->parent) == NULL)
977 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
979 mode = ALLOCNO_MODE (a);
980 rclass = ALLOCNO_COVER_CLASS (a);
981 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
982 cost -= (ira_memory_move_cost[mode][rclass][0]
983 * ira_loop_edge_freq (loop_node, regno, true)
984 + ira_memory_move_cost[mode][rclass][1]
985 * ira_loop_edge_freq (loop_node, regno, false));
987 cost += ((ira_memory_move_cost[mode][rclass][1]
988 * ira_loop_edge_freq (loop_node, regno, true)
989 + ira_memory_move_cost[mode][rclass][0]
990 * ira_loop_edge_freq (loop_node, regno, false))
991 - (ira_register_move_cost[mode][rclass][rclass]
992 * (ira_loop_edge_freq (loop_node, regno, false)
993 + ira_loop_edge_freq (loop_node, regno, true))));
997 /* Compare keys in the splay tree used to choose best allocno for
998 spilling. The best allocno has the minimal key. */
1000 allocno_spill_priority_compare (splay_tree_key k1, splay_tree_key k2)
1002 int pri1, pri2, diff;
1003 ira_allocno_t a1 = (ira_allocno_t) k1, a2 = (ira_allocno_t) k2;
1005 pri1 = (ALLOCNO_TEMP (a1)
1006 / (ALLOCNO_LEFT_CONFLICTS_NUM (a1)
1007 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
1009 pri2 = (ALLOCNO_TEMP (a2)
1010 / (ALLOCNO_LEFT_CONFLICTS_NUM (a2)
1011 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a2)][ALLOCNO_MODE (a2)]
1013 if ((diff = pri1 - pri2) != 0)
1015 if ((diff = ALLOCNO_TEMP (a1) - ALLOCNO_TEMP (a2)) != 0)
1017 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
1020 /* Allocate data of SIZE for the splay trees. We allocate only spay
1021 tree roots or splay tree nodes. If you change this, please rewrite
1024 splay_tree_allocate (int size, void *data ATTRIBUTE_UNUSED)
1026 if (size != sizeof (struct splay_tree_node_s))
1027 return ira_allocate (size);
1028 return pool_alloc (splay_tree_node_pool);
1031 /* Free data NODE for the splay trees. We allocate and free only spay
1032 tree roots or splay tree nodes. If you change this, please rewrite
1035 splay_tree_free (void *node, void *data ATTRIBUTE_UNUSED)
1038 enum reg_class cover_class;
1040 for (i = 0; i < ira_reg_class_cover_size; i++)
1042 cover_class = ira_reg_class_cover[i];
1043 if (node == uncolorable_allocnos_splay_tree[cover_class])
1049 pool_free (splay_tree_node_pool, node);
1052 /* Push allocnos to the coloring stack. The order of allocnos in the
1053 stack defines the order for the subsequent coloring. */
1055 push_allocnos_to_stack (void)
1057 ira_allocno_t allocno, a, i_allocno, *allocno_vec;
1058 enum reg_class cover_class, rclass;
1059 int allocno_pri, i_allocno_pri, allocno_cost, i_allocno_cost;
1060 int i, j, num, cover_class_allocnos_num[N_REG_CLASSES];
1061 ira_allocno_t *cover_class_allocnos[N_REG_CLASSES];
1065 VEC_truncate(ira_allocno_t, removed_splay_allocno_vec, 0);
1066 for (i = 0; i < ira_reg_class_cover_size; i++)
1068 cover_class = ira_reg_class_cover[i];
1069 cover_class_allocnos_num[cover_class] = 0;
1070 cover_class_allocnos[cover_class] = NULL;
1071 uncolorable_allocnos_splay_tree[cover_class] = NULL;
1073 /* Calculate uncolorable allocno spill costs. */
1074 for (allocno = uncolorable_allocno_bucket;
1076 allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno))
1077 if ((cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
1079 cover_class_allocnos_num[cover_class]++;
1081 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
1082 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1084 cost += calculate_allocno_spill_cost (a);
1088 /* ??? Remove cost of copies between the coalesced
1090 ALLOCNO_TEMP (allocno) = cost;
1092 /* Define place where to put uncolorable allocnos of the same cover
1094 for (num = i = 0; i < ira_reg_class_cover_size; i++)
1096 cover_class = ira_reg_class_cover[i];
1097 ira_assert (cover_class_allocnos_num[cover_class]
1098 == uncolorable_allocnos_num[cover_class]);
1099 if (cover_class_allocnos_num[cover_class] != 0)
1101 cover_class_allocnos[cover_class] = allocnos_for_spilling + num;
1102 num += cover_class_allocnos_num[cover_class];
1103 cover_class_allocnos_num[cover_class] = 0;
1105 if (USE_SPLAY_P (cover_class))
1106 uncolorable_allocnos_splay_tree[cover_class]
1107 = splay_tree_new_with_allocator (allocno_spill_priority_compare,
1108 NULL, NULL, splay_tree_allocate,
1109 splay_tree_free, NULL);
1111 ira_assert (num <= ira_allocnos_num);
1112 /* Collect uncolorable allocnos of each cover class. */
1113 for (allocno = uncolorable_allocno_bucket;
1115 allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno))
1116 if ((cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
1118 cover_class_allocnos
1119 [cover_class][cover_class_allocnos_num[cover_class]++] = allocno;
1120 if (uncolorable_allocnos_splay_tree[cover_class] != NULL)
1121 splay_tree_insert (uncolorable_allocnos_splay_tree[cover_class],
1122 (splay_tree_key) allocno,
1123 (splay_tree_value) allocno);
1127 push_only_colorable ();
1128 allocno = uncolorable_allocno_bucket;
1129 if (allocno == NULL)
1131 cover_class = ALLOCNO_COVER_CLASS (allocno);
1132 if (cover_class == NO_REGS)
1134 push_allocno_to_spill (allocno);
1137 /* Potential spilling. */
1139 (ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)] > 0);
1140 if (USE_SPLAY_P (cover_class))
1142 for (;VEC_length (ira_allocno_t, removed_splay_allocno_vec) != 0;)
1144 allocno = VEC_pop (ira_allocno_t, removed_splay_allocno_vec);
1145 ALLOCNO_SPLAY_REMOVED_P (allocno) = false;
1146 rclass = ALLOCNO_COVER_CLASS (allocno);
1147 if (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
1148 + ira_reg_class_nregs [rclass][ALLOCNO_MODE (allocno)]
1149 > ALLOCNO_AVAILABLE_REGS_NUM (allocno))
1151 (uncolorable_allocnos_splay_tree[rclass],
1152 (splay_tree_key) allocno, (splay_tree_value) allocno);
1154 allocno = ((ira_allocno_t)
1156 (uncolorable_allocnos_splay_tree[cover_class])->key);
1157 splay_tree_remove (uncolorable_allocnos_splay_tree[cover_class],
1158 (splay_tree_key) allocno);
1162 num = cover_class_allocnos_num[cover_class];
1163 ira_assert (num > 0);
1164 allocno_vec = cover_class_allocnos[cover_class];
1166 allocno_pri = allocno_cost = 0;
1167 /* Sort uncolorable allocno to find the one with the lowest
1169 for (i = 0, j = num - 1; i <= j;)
1171 i_allocno = allocno_vec[i];
1172 if (! ALLOCNO_IN_GRAPH_P (i_allocno)
1173 && ALLOCNO_IN_GRAPH_P (allocno_vec[j]))
1175 i_allocno = allocno_vec[j];
1176 allocno_vec[j] = allocno_vec[i];
1177 allocno_vec[i] = i_allocno;
1179 if (ALLOCNO_IN_GRAPH_P (i_allocno))
1182 ira_assert (ALLOCNO_TEMP (i_allocno) != INT_MAX);
1183 i_allocno_cost = ALLOCNO_TEMP (i_allocno);
1186 / (ALLOCNO_LEFT_CONFLICTS_NUM (i_allocno)
1187 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS
1189 [ALLOCNO_MODE (i_allocno)] + 1));
1190 if (allocno == NULL || allocno_pri > i_allocno_pri
1191 || (allocno_pri == i_allocno_pri
1192 && (allocno_cost > i_allocno_cost
1193 || (allocno_cost == i_allocno_cost
1194 && (ALLOCNO_NUM (allocno)
1195 > ALLOCNO_NUM (i_allocno))))))
1197 allocno = i_allocno;
1198 allocno_cost = i_allocno_cost;
1199 allocno_pri = i_allocno_pri;
1202 if (! ALLOCNO_IN_GRAPH_P (allocno_vec[j]))
1205 ira_assert (allocno != NULL && j >= 0);
1206 cover_class_allocnos_num[cover_class] = j + 1;
1208 ira_assert (ALLOCNO_IN_GRAPH_P (allocno)
1209 && ALLOCNO_COVER_CLASS (allocno) == cover_class
1210 && (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
1211 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE
1213 > ALLOCNO_AVAILABLE_REGS_NUM (allocno)));
1214 remove_allocno_from_bucket_and_push (allocno, false);
1216 ira_assert (colorable_allocno_bucket == NULL
1217 && uncolorable_allocno_bucket == NULL);
1218 for (i = 0; i < ira_reg_class_cover_size; i++)
1220 cover_class = ira_reg_class_cover[i];
1221 ira_assert (uncolorable_allocnos_num[cover_class] == 0);
1222 if (uncolorable_allocnos_splay_tree[cover_class] != NULL)
1223 splay_tree_delete (uncolorable_allocnos_splay_tree[cover_class]);
1227 /* Pop the coloring stack and assign hard registers to the popped
1230 pop_allocnos_from_stack (void)
1232 ira_allocno_t allocno;
1233 enum reg_class cover_class;
1235 for (;VEC_length (ira_allocno_t, allocno_stack_vec) != 0;)
1237 allocno = VEC_pop (ira_allocno_t, allocno_stack_vec);
1238 cover_class = ALLOCNO_COVER_CLASS (allocno);
1239 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1241 fprintf (ira_dump_file, " Popping");
1242 print_coalesced_allocno (allocno);
1243 fprintf (ira_dump_file, " -- ");
1245 if (cover_class == NO_REGS)
1247 ALLOCNO_HARD_REGNO (allocno) = -1;
1248 ALLOCNO_ASSIGNED_P (allocno) = true;
1249 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
1251 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
1252 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1253 fprintf (ira_dump_file, "assign memory\n");
1255 else if (assign_hard_reg (allocno, false))
1257 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1258 fprintf (ira_dump_file, "assign reg %d\n",
1259 ALLOCNO_HARD_REGNO (allocno));
1261 else if (ALLOCNO_ASSIGNED_P (allocno))
1263 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1264 fprintf (ira_dump_file, "spill\n");
1266 ALLOCNO_IN_GRAPH_P (allocno) = true;
1270 /* Set up number of available hard registers for ALLOCNO. */
1272 setup_allocno_available_regs_num (ira_allocno_t allocno)
1274 int i, n, hard_regs_num;
1275 enum reg_class cover_class;
1277 HARD_REG_SET temp_set;
1279 cover_class = ALLOCNO_COVER_CLASS (allocno);
1280 ALLOCNO_AVAILABLE_REGS_NUM (allocno) = ira_available_class_regs[cover_class];
1281 if (cover_class == NO_REGS)
1283 CLEAR_HARD_REG_SET (temp_set);
1284 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) == allocno);
1285 hard_regs_num = ira_class_hard_regs_num[cover_class];
1286 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
1287 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1289 IOR_HARD_REG_SET (temp_set, ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
1293 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
1294 if (TEST_HARD_REG_BIT (temp_set, ira_class_hard_regs[cover_class][i]))
1296 if (internal_flag_ira_verbose > 2 && n > 0 && ira_dump_file != NULL)
1297 fprintf (ira_dump_file, " Reg %d of %s has %d regs less\n",
1298 ALLOCNO_REGNO (allocno), reg_class_names[cover_class], n);
1299 ALLOCNO_AVAILABLE_REGS_NUM (allocno) -= n;
1302 /* Set up ALLOCNO_LEFT_CONFLICTS_NUM for ALLOCNO. */
1304 setup_allocno_left_conflicts_num (ira_allocno_t allocno)
1306 int i, hard_regs_num, hard_regno, conflict_allocnos_size;
1307 ira_allocno_t a, conflict_allocno;
1308 enum reg_class cover_class;
1309 HARD_REG_SET temp_set;
1310 ira_allocno_conflict_iterator aci;
1312 cover_class = ALLOCNO_COVER_CLASS (allocno);
1313 hard_regs_num = ira_class_hard_regs_num[cover_class];
1314 CLEAR_HARD_REG_SET (temp_set);
1315 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) == allocno);
1316 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
1317 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1319 IOR_HARD_REG_SET (temp_set, ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a));
1323 AND_HARD_REG_SET (temp_set, reg_class_contents[cover_class]);
1324 AND_COMPL_HARD_REG_SET (temp_set, ira_no_alloc_regs);
1325 conflict_allocnos_size = 0;
1326 if (! hard_reg_set_empty_p (temp_set))
1327 for (i = 0; i < (int) hard_regs_num; i++)
1329 hard_regno = ira_class_hard_regs[cover_class][i];
1330 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
1332 conflict_allocnos_size++;
1333 CLEAR_HARD_REG_BIT (temp_set, hard_regno);
1334 if (hard_reg_set_empty_p (temp_set))
1338 CLEAR_HARD_REG_SET (temp_set);
1339 if (allocno_coalesced_p)
1340 bitmap_clear (processed_coalesced_allocno_bitmap);
1341 if (cover_class != NO_REGS)
1342 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
1343 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1345 FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
1348 = ALLOCNO_FIRST_COALESCED_ALLOCNO (conflict_allocno);
1349 if (bitmap_bit_p (consideration_allocno_bitmap,
1350 ALLOCNO_NUM (conflict_allocno)))
1352 ira_assert (cover_class
1353 == ALLOCNO_COVER_CLASS (conflict_allocno));
1354 if (allocno_coalesced_p)
1356 if (bitmap_bit_p (processed_coalesced_allocno_bitmap,
1357 ALLOCNO_NUM (conflict_allocno)))
1359 bitmap_set_bit (processed_coalesced_allocno_bitmap,
1360 ALLOCNO_NUM (conflict_allocno));
1362 if (! ALLOCNO_ASSIGNED_P (conflict_allocno))
1363 conflict_allocnos_size
1364 += (ira_reg_class_nregs
1365 [cover_class][ALLOCNO_MODE (conflict_allocno)]);
1366 else if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno))
1369 int last = (hard_regno
1371 [hard_regno][ALLOCNO_MODE (conflict_allocno)]);
1373 while (hard_regno < last)
1375 if (! TEST_HARD_REG_BIT (temp_set, hard_regno))
1377 conflict_allocnos_size++;
1378 SET_HARD_REG_BIT (temp_set, hard_regno);
1388 ALLOCNO_LEFT_CONFLICTS_NUM (allocno) = conflict_allocnos_size;
1391 /* Put ALLOCNO in a bucket corresponding to its number and size of its
1392 conflicting allocnos and hard registers. */
1394 put_allocno_into_bucket (ira_allocno_t allocno)
1397 enum reg_class cover_class;
1399 cover_class = ALLOCNO_COVER_CLASS (allocno);
1400 hard_regs_num = ira_class_hard_regs_num[cover_class];
1401 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
1403 ALLOCNO_IN_GRAPH_P (allocno) = true;
1404 setup_allocno_left_conflicts_num (allocno);
1405 setup_allocno_available_regs_num (allocno);
1406 if (ALLOCNO_LEFT_CONFLICTS_NUM (allocno)
1407 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
1408 <= ALLOCNO_AVAILABLE_REGS_NUM (allocno))
1409 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
1411 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
1414 /* The function is used to sort allocnos according to their execution
1417 copy_freq_compare_func (const void *v1p, const void *v2p)
1419 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
1427 /* If freqencies are equal, sort by copies, so that the results of
1428 qsort leave nothing to chance. */
1429 return cp1->num - cp2->num;
1432 /* Merge two sets of coalesced allocnos given correspondingly by
1433 allocnos A1 and A2 (more accurately merging A2 set into A1
1436 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
1438 ira_allocno_t a, first, last, next;
1440 first = ALLOCNO_FIRST_COALESCED_ALLOCNO (a1);
1441 if (first == ALLOCNO_FIRST_COALESCED_ALLOCNO (a2))
1443 for (last = a2, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
1444 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1446 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = first;
1451 next = ALLOCNO_NEXT_COALESCED_ALLOCNO (first);
1452 ALLOCNO_NEXT_COALESCED_ALLOCNO (first) = a2;
1453 ALLOCNO_NEXT_COALESCED_ALLOCNO (last) = next;
1456 /* Return TRUE if there are conflicting allocnos from two sets of
1457 coalesced allocnos given correspondingly by allocnos A1 and A2. If
1458 RELOAD_P is TRUE, we use live ranges to find conflicts because
1459 conflicts are represented only for allocnos of the same cover class
1460 and during the reload pass we coalesce allocnos for sharing stack
1463 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2,
1466 ira_allocno_t a, conflict_allocno;
1467 ira_allocno_conflict_iterator aci;
1469 if (allocno_coalesced_p)
1471 bitmap_clear (processed_coalesced_allocno_bitmap);
1472 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
1473 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1475 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
1480 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
1481 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1485 for (conflict_allocno = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
1487 = ALLOCNO_NEXT_COALESCED_ALLOCNO (conflict_allocno))
1489 if (ira_allocno_live_ranges_intersect_p (a, conflict_allocno))
1491 if (conflict_allocno == a1)
1497 FOR_EACH_ALLOCNO_CONFLICT (a, conflict_allocno, aci)
1498 if (conflict_allocno == a1
1499 || (allocno_coalesced_p
1500 && bitmap_bit_p (processed_coalesced_allocno_bitmap,
1501 ALLOCNO_NUM (conflict_allocno))))
1510 /* The major function for aggressive allocno coalescing. For the
1511 reload pass (RELOAD_P) we coalesce only spilled allocnos. If some
1512 allocnos have been coalesced, we set up flag
1513 allocno_coalesced_p. */
1515 coalesce_allocnos (bool reload_p)
1518 ira_copy_t cp, next_cp, *sorted_copies;
1519 enum reg_class cover_class;
1520 enum machine_mode mode;
1522 int i, n, cp_num, regno;
1525 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
1526 * sizeof (ira_copy_t));
1528 /* Collect copies. */
1529 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
1531 a = ira_allocnos[j];
1532 regno = ALLOCNO_REGNO (a);
1533 if ((! reload_p && ALLOCNO_ASSIGNED_P (a))
1535 && (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
1536 || (regno < ira_reg_equiv_len
1537 && (ira_reg_equiv_const[regno] != NULL_RTX
1538 || ira_reg_equiv_invariant_p[regno])))))
1540 cover_class = ALLOCNO_COVER_CLASS (a);
1541 mode = ALLOCNO_MODE (a);
1542 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1546 next_cp = cp->next_first_allocno_copy;
1547 regno = ALLOCNO_REGNO (cp->second);
1549 || (ALLOCNO_COVER_CLASS (cp->second) == cover_class
1550 && ALLOCNO_MODE (cp->second) == mode))
1551 && (cp->insn != NULL || cp->constraint_p)
1552 && ((! reload_p && ! ALLOCNO_ASSIGNED_P (cp->second))
1554 && ALLOCNO_ASSIGNED_P (cp->second)
1555 && ALLOCNO_HARD_REGNO (cp->second) < 0
1556 && (regno >= ira_reg_equiv_len
1557 || (! ira_reg_equiv_invariant_p[regno]
1558 && ira_reg_equiv_const[regno] == NULL_RTX)))))
1559 sorted_copies[cp_num++] = cp;
1561 else if (cp->second == a)
1562 next_cp = cp->next_second_allocno_copy;
1567 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
1568 /* Coalesced copies, most frequently executed first. */
1569 for (; cp_num != 0;)
1571 for (i = 0; i < cp_num; i++)
1573 cp = sorted_copies[i];
1574 if (! coalesced_allocno_conflict_p (cp->first, cp->second, reload_p))
1576 allocno_coalesced_p = true;
1577 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1580 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
1581 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1582 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
1584 merge_allocnos (cp->first, cp->second);
1589 /* Collect the rest of copies. */
1590 for (n = 0; i < cp_num; i++)
1592 cp = sorted_copies[i];
1593 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->first)
1594 != ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->second))
1595 sorted_copies[n++] = cp;
1599 ira_free (sorted_copies);
1602 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
1603 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
1605 color_allocnos (void)
1611 allocno_coalesced_p = false;
1612 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
1613 if (flag_ira_coalesce)
1614 coalesce_allocnos (false);
1615 /* Put the allocnos into the corresponding buckets. */
1616 colorable_allocno_bucket = NULL;
1617 uncolorable_allocno_bucket = NULL;
1618 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1620 a = ira_allocnos[i];
1621 if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
1623 ALLOCNO_HARD_REGNO (a) = -1;
1624 ALLOCNO_ASSIGNED_P (a) = true;
1625 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
1626 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
1627 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1629 fprintf (ira_dump_file, " Spill");
1630 print_coalesced_allocno (a);
1631 fprintf (ira_dump_file, "\n");
1635 put_allocno_into_bucket (a);
1637 push_allocnos_to_stack ();
1638 pop_allocnos_from_stack ();
1639 if (flag_ira_coalesce)
1640 /* We don't need coalesced allocnos for ira_reassign_pseudos. */
1641 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1643 a = ira_allocnos[i];
1644 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
1645 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
1647 ira_free_bitmap (processed_coalesced_allocno_bitmap);
1648 allocno_coalesced_p = false;
1653 /* Output information about the loop given by its LOOP_TREE_NODE. */
1655 print_loop_title (ira_loop_tree_node_t loop_tree_node)
1660 ira_assert (loop_tree_node->loop != NULL);
1661 fprintf (ira_dump_file,
1662 "\n Loop %d (parent %d, header bb%d, depth %d)\n all:",
1663 loop_tree_node->loop->num,
1664 (loop_tree_node->parent == NULL
1665 ? -1 : loop_tree_node->parent->loop->num),
1666 loop_tree_node->loop->header->index,
1667 loop_depth (loop_tree_node->loop));
1668 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
1669 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
1670 fprintf (ira_dump_file, "\n modified regnos:");
1671 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
1672 fprintf (ira_dump_file, " %d", j);
1673 fprintf (ira_dump_file, "\n border:");
1674 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
1675 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
1676 fprintf (ira_dump_file, "\n Pressure:");
1677 for (j = 0; (int) j < ira_reg_class_cover_size; j++)
1679 enum reg_class cover_class;
1681 cover_class = ira_reg_class_cover[j];
1682 if (loop_tree_node->reg_pressure[cover_class] == 0)
1684 fprintf (ira_dump_file, " %s=%d", reg_class_names[cover_class],
1685 loop_tree_node->reg_pressure[cover_class]);
1687 fprintf (ira_dump_file, "\n");
1690 /* Color the allocnos inside loop (in the extreme case it can be all
1691 of the function) given the corresponding LOOP_TREE_NODE. The
1692 function is called for each loop during top-down traverse of the
1695 color_pass (ira_loop_tree_node_t loop_tree_node)
1697 int regno, hard_regno, index = -1;
1698 int cost, exit_freq, enter_freq;
1701 enum machine_mode mode;
1702 enum reg_class rclass, cover_class;
1703 ira_allocno_t a, subloop_allocno;
1704 ira_loop_tree_node_t subloop_node;
1706 ira_assert (loop_tree_node->bb == NULL);
1707 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1708 print_loop_title (loop_tree_node);
1710 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
1711 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
1712 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
1714 a = ira_allocnos[j];
1715 if (! ALLOCNO_ASSIGNED_P (a))
1717 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
1719 /* Color all mentioned allocnos including transparent ones. */
1721 /* Process caps. They are processed just once. */
1722 if (flag_ira_algorithm == IRA_ALGORITHM_MIXED
1723 || flag_ira_algorithm == IRA_ALGORITHM_REGIONAL)
1724 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
1726 a = ira_allocnos[j];
1727 if (ALLOCNO_CAP_MEMBER (a) == NULL)
1729 /* Remove from processing in the next loop. */
1730 bitmap_clear_bit (consideration_allocno_bitmap, j);
1731 rclass = ALLOCNO_COVER_CLASS (a);
1732 if ((flag_ira_algorithm == IRA_ALGORITHM_MIXED
1733 && loop_tree_node->reg_pressure[rclass]
1734 <= ira_available_class_regs[rclass]))
1736 mode = ALLOCNO_MODE (a);
1737 hard_regno = ALLOCNO_HARD_REGNO (a);
1738 if (hard_regno >= 0)
1740 index = ira_class_hard_reg_index[rclass][hard_regno];
1741 ira_assert (index >= 0);
1743 regno = ALLOCNO_REGNO (a);
1744 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
1745 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
1746 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
1747 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
1748 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
1749 if (hard_regno >= 0)
1750 update_copy_costs (subloop_allocno, true);
1751 /* We don't need updated costs anymore: */
1752 ira_free_allocno_updated_costs (subloop_allocno);
1755 /* Update costs of the corresponding allocnos (not caps) in the
1757 for (subloop_node = loop_tree_node->subloops;
1758 subloop_node != NULL;
1759 subloop_node = subloop_node->subloop_next)
1761 ira_assert (subloop_node->bb == NULL);
1762 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
1764 a = ira_allocnos[j];
1765 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
1766 mode = ALLOCNO_MODE (a);
1767 rclass = ALLOCNO_COVER_CLASS (a);
1768 hard_regno = ALLOCNO_HARD_REGNO (a);
1769 if (hard_regno >= 0)
1771 index = ira_class_hard_reg_index[rclass][hard_regno];
1772 ira_assert (index >= 0);
1774 regno = ALLOCNO_REGNO (a);
1775 /* ??? conflict costs */
1776 subloop_allocno = subloop_node->regno_allocno_map[regno];
1777 if (subloop_allocno == NULL
1778 || ALLOCNO_CAP (subloop_allocno) != NULL)
1780 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
1781 ALLOCNO_NUM (subloop_allocno)));
1782 if (flag_ira_algorithm == IRA_ALGORITHM_MIXED
1783 && (loop_tree_node->reg_pressure[rclass]
1784 <= ira_available_class_regs[rclass]))
1786 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
1788 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
1789 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
1790 if (hard_regno >= 0)
1791 update_copy_costs (subloop_allocno, true);
1792 /* We don't need updated costs anymore: */
1793 ira_free_allocno_updated_costs (subloop_allocno);
1797 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
1798 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
1799 ira_assert (regno < ira_reg_equiv_len);
1800 if (ira_reg_equiv_invariant_p[regno]
1801 || ira_reg_equiv_const[regno] != NULL_RTX)
1803 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
1805 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
1806 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
1807 if (hard_regno >= 0)
1808 update_copy_costs (subloop_allocno, true);
1809 /* We don't need updated costs anymore: */
1810 ira_free_allocno_updated_costs (subloop_allocno);
1813 else if (hard_regno < 0)
1815 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
1816 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
1817 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
1821 cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
1822 cost = (ira_register_move_cost[mode][rclass][rclass]
1823 * (exit_freq + enter_freq));
1824 ira_allocate_and_set_or_copy_costs
1825 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), cover_class,
1826 ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno),
1827 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
1828 ira_allocate_and_set_or_copy_costs
1829 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
1831 ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
1832 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
1833 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
1835 if (ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno)
1836 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
1837 ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno)
1838 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
1839 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
1840 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
1841 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
1847 /* Initialize the common data for coloring and calls functions to do
1848 Chaitin-Briggs and regional coloring. */
1852 coloring_allocno_bitmap = ira_allocate_bitmap ();
1853 allocnos_for_spilling
1854 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
1855 * ira_allocnos_num);
1856 splay_tree_node_pool = create_alloc_pool ("splay tree nodes",
1857 sizeof (struct splay_tree_node_s),
1859 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
1860 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
1862 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
1864 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1865 ira_print_disposition (ira_dump_file);
1867 free_alloc_pool (splay_tree_node_pool);
1868 ira_free_bitmap (coloring_allocno_bitmap);
1869 ira_free (allocnos_for_spilling);
1874 /* Move spill/restore code, which are to be generated in ira-emit.c,
1875 to less frequent points (if it is profitable) by reassigning some
1876 allocnos (in loop with subloops containing in another loop) to
1877 memory which results in longer live-range where the corresponding
1878 pseudo-registers will be in memory. */
1880 move_spill_restore (void)
1882 int cost, regno, hard_regno, hard_regno2, index;
1884 int enter_freq, exit_freq;
1885 enum machine_mode mode;
1886 enum reg_class rclass;
1887 ira_allocno_t a, parent_allocno, subloop_allocno;
1888 ira_loop_tree_node_t parent, loop_node, subloop_node;
1889 ira_allocno_iterator ai;
1894 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
1895 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
1896 FOR_EACH_ALLOCNO (a, ai)
1898 regno = ALLOCNO_REGNO (a);
1899 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
1900 if (ALLOCNO_CAP_MEMBER (a) != NULL
1901 || ALLOCNO_CAP (a) != NULL
1902 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
1903 || loop_node->children == NULL
1904 /* don't do the optimization because it can create
1905 copies and the reload pass can spill the allocno set
1906 by copy although the allocno will not get memory
1908 || ira_reg_equiv_invariant_p[regno]
1909 || ira_reg_equiv_const[regno] != NULL_RTX
1910 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)))
1912 mode = ALLOCNO_MODE (a);
1913 rclass = ALLOCNO_COVER_CLASS (a);
1914 index = ira_class_hard_reg_index[rclass][hard_regno];
1915 ira_assert (index >= 0);
1916 cost = (ALLOCNO_MEMORY_COST (a)
1917 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
1918 ? ALLOCNO_COVER_CLASS_COST (a)
1919 : ALLOCNO_HARD_REG_COSTS (a)[index]));
1920 for (subloop_node = loop_node->subloops;
1921 subloop_node != NULL;
1922 subloop_node = subloop_node->subloop_next)
1924 ira_assert (subloop_node->bb == NULL);
1925 subloop_allocno = subloop_node->regno_allocno_map[regno];
1926 if (subloop_allocno == NULL)
1928 /* We have accumulated cost. To get the real cost of
1929 allocno usage in the loop we should subtract costs of
1930 the subloop allocnos. */
1931 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
1932 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
1933 ? ALLOCNO_COVER_CLASS_COST (subloop_allocno)
1934 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
1935 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
1936 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
1937 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
1938 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
1939 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
1943 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
1944 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
1945 if (hard_regno2 != hard_regno)
1946 cost -= (ira_register_move_cost[mode][rclass][rclass]
1947 * (exit_freq + enter_freq));
1950 if ((parent = loop_node->parent) != NULL
1951 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
1953 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
1954 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
1955 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
1956 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
1957 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
1961 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
1962 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
1963 if (hard_regno2 != hard_regno)
1964 cost -= (ira_register_move_cost[mode][rclass][rclass]
1965 * (exit_freq + enter_freq));
1970 ALLOCNO_HARD_REGNO (a) = -1;
1971 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1975 " Moving spill/restore for a%dr%d up from loop %d",
1976 ALLOCNO_NUM (a), regno, loop_node->loop->num);
1977 fprintf (ira_dump_file, " - profit %d\n", -cost);
1989 /* Update current hard reg costs and current conflict hard reg costs
1990 for allocno A. It is done by processing its copies containing
1991 other allocnos already assigned. */
1993 update_curr_costs (ira_allocno_t a)
1995 int i, hard_regno, cost;
1996 enum machine_mode mode;
1997 enum reg_class cover_class, rclass;
1998 ira_allocno_t another_a;
1999 ira_copy_t cp, next_cp;
2001 ira_assert (! ALLOCNO_ASSIGNED_P (a));
2002 cover_class = ALLOCNO_COVER_CLASS (a);
2003 if (cover_class == NO_REGS)
2005 mode = ALLOCNO_MODE (a);
2006 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2010 next_cp = cp->next_first_allocno_copy;
2011 another_a = cp->second;
2013 else if (cp->second == a)
2015 next_cp = cp->next_second_allocno_copy;
2016 another_a = cp->first;
2020 if (cover_class != ALLOCNO_COVER_CLASS (another_a)
2021 || ! ALLOCNO_ASSIGNED_P (another_a)
2022 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
2024 rclass = REGNO_REG_CLASS (hard_regno);
2025 i = ira_class_hard_reg_index[cover_class][hard_regno];
2026 ira_assert (i >= 0);
2027 cost = (cp->first == a
2028 ? ira_register_move_cost[mode][rclass][cover_class]
2029 : ira_register_move_cost[mode][cover_class][rclass]);
2030 ira_allocate_and_set_or_copy_costs
2031 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
2032 cover_class, ALLOCNO_COVER_CLASS_COST (a),
2033 ALLOCNO_HARD_REG_COSTS (a));
2034 ira_allocate_and_set_or_copy_costs
2035 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
2036 cover_class, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2037 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
2038 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
2042 /* Map: allocno number -> allocno priority. */
2043 static int *allocno_priorities;
2045 /* Set up priorities for N allocnos in array
2046 CONSIDERATION_ALLOCNOS. */
2048 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
2050 int i, length, nrefs, priority, max_priority, mult;
2054 for (i = 0; i < n; i++)
2056 a = consideration_allocnos[i];
2057 nrefs = ALLOCNO_NREFS (a);
2058 ira_assert (nrefs >= 0);
2059 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2060 ira_assert (mult >= 0);
2061 allocno_priorities[ALLOCNO_NUM (a)]
2064 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a))
2065 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
2067 priority = -priority;
2068 if (max_priority < priority)
2069 max_priority = priority;
2071 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2072 for (i = 0; i < n; i++)
2074 a = consideration_allocnos[i];
2075 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2078 allocno_priorities[ALLOCNO_NUM (a)]
2079 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2083 /* Sort allocnos according to their priorities which are calculated
2084 analogous to ones in file `global.c'. */
2086 allocno_priority_compare_func (const void *v1p, const void *v2p)
2088 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2089 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2092 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
2093 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
2097 /* If regs are equally good, sort by allocnos, so that the results of
2098 qsort leave nothing to chance. */
2099 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2102 /* Try to assign hard registers to the unassigned allocnos and
2103 allocnos conflicting with them or conflicting with allocnos whose
2104 regno >= START_REGNO. The function is called after ira_flattening,
2105 so more allocnos (including ones created in ira-emit.c) will have a
2106 chance to get a hard register. We use simple assignment algorithm
2107 based on priorities. */
2109 ira_reassign_conflict_allocnos (int start_regno)
2111 int i, allocnos_to_color_num;
2112 ira_allocno_t a, conflict_a;
2113 ira_allocno_conflict_iterator aci;
2114 enum reg_class cover_class;
2115 bitmap allocnos_to_color;
2116 ira_allocno_iterator ai;
2118 allocnos_to_color = ira_allocate_bitmap ();
2119 allocnos_to_color_num = 0;
2120 FOR_EACH_ALLOCNO (a, ai)
2122 if (! ALLOCNO_ASSIGNED_P (a)
2123 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
2125 if (ALLOCNO_COVER_CLASS (a) != NO_REGS)
2126 sorted_allocnos[allocnos_to_color_num++] = a;
2129 ALLOCNO_ASSIGNED_P (a) = true;
2130 ALLOCNO_HARD_REGNO (a) = -1;
2131 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2132 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2134 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
2136 if (ALLOCNO_REGNO (a) < start_regno
2137 || (cover_class = ALLOCNO_COVER_CLASS (a)) == NO_REGS)
2139 FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
2141 ira_assert (cover_class == ALLOCNO_COVER_CLASS (conflict_a));
2142 if (bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
2144 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a));
2145 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
2148 ira_free_bitmap (allocnos_to_color);
2149 if (allocnos_to_color_num > 1)
2151 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
2152 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
2153 allocno_priority_compare_func);
2155 for (i = 0; i < allocnos_to_color_num; i++)
2157 a = sorted_allocnos[i];
2158 ALLOCNO_ASSIGNED_P (a) = false;
2159 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2160 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2161 update_curr_costs (a);
2163 for (i = 0; i < allocnos_to_color_num; i++)
2165 a = sorted_allocnos[i];
2166 if (assign_hard_reg (a, true))
2168 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2171 " Secondary allocation: assign hard reg %d to reg %d\n",
2172 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
2179 /* This page contains code to coalesce memory stack slots used by
2180 spilled allocnos. This results in smaller stack frame, better data
2181 locality, and in smaller code for some architectures like
2182 x86/x86_64 where insn size depends on address displacement value.
2183 On the other hand, it can worsen insn scheduling after the RA but
2184 in practice it is less important than smaller stack frames. */
2186 /* Usage cost and order number of coalesced allocno set to which
2187 given pseudo register belongs to. */
2188 static int *regno_coalesced_allocno_cost;
2189 static int *regno_coalesced_allocno_num;
2191 /* Sort pseudos according frequencies of coalesced allocno sets they
2192 belong to (putting most frequently ones first), and according to
2193 coalesced allocno set order numbers. */
2195 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
2197 const int regno1 = *(const int *) v1p;
2198 const int regno2 = *(const int *) v2p;
2201 if ((diff = (regno_coalesced_allocno_cost[regno2]
2202 - regno_coalesced_allocno_cost[regno1])) != 0)
2204 if ((diff = (regno_coalesced_allocno_num[regno1]
2205 - regno_coalesced_allocno_num[regno2])) != 0)
2207 return regno1 - regno2;
2210 /* Widest width in which each pseudo reg is referred to (via subreg).
2211 It is used for sorting pseudo registers. */
2212 static unsigned int *regno_max_ref_width;
2214 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1. */
2215 #ifdef STACK_GROWS_DOWNWARD
2216 # undef STACK_GROWS_DOWNWARD
2217 # define STACK_GROWS_DOWNWARD 1
2219 # define STACK_GROWS_DOWNWARD 0
2222 /* Sort pseudos according their slot numbers (putting ones with
2223 smaller numbers first, or last when the frame pointer is not
2226 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
2228 const int regno1 = *(const int *) v1p;
2229 const int regno2 = *(const int *) v2p;
2230 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
2231 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
2232 int diff, slot_num1, slot_num2;
2233 int total_size1, total_size2;
2235 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
2237 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
2238 return regno1 - regno2;
2241 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
2243 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
2244 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
2245 if ((diff = slot_num1 - slot_num2) != 0)
2246 return (frame_pointer_needed
2247 || !FRAME_GROWS_DOWNWARD == STACK_GROWS_DOWNWARD ? diff : -diff);
2248 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1), regno_max_ref_width[regno1]);
2249 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2), regno_max_ref_width[regno2]);
2250 if ((diff = total_size2 - total_size1) != 0)
2252 return regno1 - regno2;
2255 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
2256 for coalesced allocno sets containing allocnos with their regnos
2257 given in array PSEUDO_REGNOS of length N. */
2259 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
2261 int i, num, regno, cost;
2262 ira_allocno_t allocno, a;
2264 for (num = i = 0; i < n; i++)
2266 regno = pseudo_regnos[i];
2267 allocno = ira_regno_allocno_map[regno];
2268 if (allocno == NULL)
2270 regno_coalesced_allocno_cost[regno] = 0;
2271 regno_coalesced_allocno_num[regno] = ++num;
2274 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
2277 for (cost = 0, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2278 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2280 cost += ALLOCNO_FREQ (a);
2284 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2285 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2287 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
2288 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
2295 /* Collect spilled allocnos representing coalesced allocno sets (the
2296 first coalesced allocno). The collected allocnos are returned
2297 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
2298 number of the collected allocnos. The allocnos are given by their
2299 regnos in array PSEUDO_REGNOS of length N. */
2301 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
2302 ira_allocno_t *spilled_coalesced_allocnos)
2305 ira_allocno_t allocno;
2307 for (num = i = 0; i < n; i++)
2309 regno = pseudo_regnos[i];
2310 allocno = ira_regno_allocno_map[regno];
2311 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
2312 || ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
2314 spilled_coalesced_allocnos[num++] = allocno;
2319 /* Array of bitmaps of size IRA_MAX_POINT. Bitmap for given point
2320 contains numbers of coalesced allocnos living at this point. */
2321 static regset_head *coalesced_allocnos_living_at_program_points;
2323 /* Return TRUE if coalesced allocnos represented by ALLOCNO live at
2324 program points of coalesced allocnos with number N. */
2326 coalesced_allocnos_live_at_points_p (ira_allocno_t allocno, int n)
2330 allocno_live_range_t r;
2332 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2333 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2335 for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
2336 for (i = r->start; i <= r->finish; i++)
2337 if (bitmap_bit_p (&coalesced_allocnos_living_at_program_points[i], n))
2345 /* Mark program points where coalesced allocnos represented by ALLOCNO
2348 set_coalesced_allocnos_live_points (ira_allocno_t allocno)
2352 allocno_live_range_t r;
2354 n = ALLOCNO_TEMP (allocno);
2355 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2356 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2358 for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
2359 for (i = r->start; i <= r->finish; i++)
2360 bitmap_set_bit (&coalesced_allocnos_living_at_program_points[i], n);
2366 /* We have coalesced allocnos involving in copies. Coalesce allocnos
2367 further in order to share the same memory stack slot. Allocnos
2368 representing sets of allocnos coalesced before the call are given
2369 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
2370 some allocnos were coalesced in the function. */
2372 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
2374 int i, j, last_coalesced_allocno_num;
2375 ira_allocno_t allocno, a;
2376 bool merged_p = false;
2378 coalesced_allocnos_living_at_program_points
2379 = (regset_head *) ira_allocate (sizeof (regset_head) * ira_max_point);
2380 for (i = 0; i < ira_max_point; i++)
2381 INIT_REG_SET (&coalesced_allocnos_living_at_program_points[i]);
2382 last_coalesced_allocno_num = 0;
2383 /* Coalesce non-conflicting spilled allocnos preferring most
2385 for (i = 0; i < num; i++)
2387 allocno = spilled_coalesced_allocnos[i];
2388 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
2389 || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
2390 && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
2391 || ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
2393 for (j = 0; j < i; j++)
2395 a = spilled_coalesced_allocnos[j];
2396 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
2397 && (ALLOCNO_REGNO (a) >= ira_reg_equiv_len
2398 || (! ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
2399 && ira_reg_equiv_const[ALLOCNO_REGNO (a)] == NULL_RTX))
2400 && ! coalesced_allocnos_live_at_points_p (allocno,
2406 /* No coalescing: set up number for coalesced allocnos
2407 represented by ALLOCNO. */
2408 ALLOCNO_TEMP (allocno) = last_coalesced_allocno_num++;
2409 set_coalesced_allocnos_live_points (allocno);
2413 allocno_coalesced_p = true;
2415 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2416 fprintf (ira_dump_file,
2417 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
2418 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
2419 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2420 ALLOCNO_TEMP (allocno) = ALLOCNO_TEMP (a);
2421 set_coalesced_allocnos_live_points (allocno);
2422 merge_allocnos (a, allocno);
2423 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
2426 for (i = 0; i < ira_max_point; i++)
2427 CLEAR_REG_SET (&coalesced_allocnos_living_at_program_points[i]);
2428 ira_free (coalesced_allocnos_living_at_program_points);
2432 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
2433 subsequent assigning stack slots to them in the reload pass. To do
2434 this we coalesce spilled allocnos first to decrease the number of
2435 memory-memory move insns. This function is called by the
2438 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
2439 unsigned int *reg_max_ref_width)
2441 int max_regno = max_reg_num ();
2442 int i, regno, num, slot_num;
2443 ira_allocno_t allocno, a;
2444 ira_allocno_iterator ai;
2445 ira_allocno_t *spilled_coalesced_allocnos;
2447 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
2448 /* Set up allocnos can be coalesced. */
2449 coloring_allocno_bitmap = ira_allocate_bitmap ();
2450 for (i = 0; i < n; i++)
2452 regno = pseudo_regnos[i];
2453 allocno = ira_regno_allocno_map[regno];
2454 if (allocno != NULL)
2455 bitmap_set_bit (coloring_allocno_bitmap,
2456 ALLOCNO_NUM (allocno));
2458 allocno_coalesced_p = false;
2459 coalesce_allocnos (true);
2460 ira_free_bitmap (coloring_allocno_bitmap);
2461 regno_coalesced_allocno_cost
2462 = (int *) ira_allocate (max_regno * sizeof (int));
2463 regno_coalesced_allocno_num
2464 = (int *) ira_allocate (max_regno * sizeof (int));
2465 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
2466 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
2467 /* Sort regnos according frequencies of the corresponding coalesced
2469 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
2470 spilled_coalesced_allocnos
2471 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
2472 * sizeof (ira_allocno_t));
2473 /* Collect allocnos representing the spilled coalesced allocno
2475 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
2476 spilled_coalesced_allocnos);
2477 if (flag_ira_share_spill_slots
2478 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
2480 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
2481 qsort (pseudo_regnos, n, sizeof (int),
2482 coalesced_pseudo_reg_freq_compare);
2483 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
2484 spilled_coalesced_allocnos);
2486 ira_free_bitmap (processed_coalesced_allocno_bitmap);
2487 allocno_coalesced_p = false;
2488 /* Assign stack slot numbers to spilled allocno sets, use smaller
2489 numbers for most frequently used coalesced allocnos. -1 is
2490 reserved for dynamic search of stack slots for pseudos spilled by
2493 for (i = 0; i < num; i++)
2495 allocno = spilled_coalesced_allocnos[i];
2496 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
2497 || ALLOCNO_HARD_REGNO (allocno) >= 0
2498 || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
2499 && (ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)]
2500 || ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX)))
2502 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2503 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
2505 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2506 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2508 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
2509 ALLOCNO_HARD_REGNO (a) = -slot_num;
2510 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2511 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
2512 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
2513 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
2514 reg_max_ref_width[ALLOCNO_REGNO (a)]));
2519 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2520 fprintf (ira_dump_file, "\n");
2522 ira_spilled_reg_stack_slots_num = slot_num - 1;
2523 ira_free (spilled_coalesced_allocnos);
2524 /* Sort regnos according the slot numbers. */
2525 regno_max_ref_width = reg_max_ref_width;
2526 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
2527 /* Uncoalesce allocnos which is necessary for (re)assigning during
2529 FOR_EACH_ALLOCNO (a, ai)
2531 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
2532 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
2534 ira_free (regno_coalesced_allocno_num);
2535 ira_free (regno_coalesced_allocno_cost);
2540 /* This page contains code used by the reload pass to improve the
2543 /* The function is called from reload to mark changes in the
2544 allocation of REGNO made by the reload. Remember that reg_renumber
2545 reflects the change result. */
2547 ira_mark_allocation_change (int regno)
2549 ira_allocno_t a = ira_regno_allocno_map[regno];
2550 int old_hard_regno, hard_regno, cost;
2551 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
2553 ira_assert (a != NULL);
2554 hard_regno = reg_renumber[regno];
2555 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
2557 if (old_hard_regno < 0)
2558 cost = -ALLOCNO_MEMORY_COST (a);
2561 ira_assert (ira_class_hard_reg_index[cover_class][old_hard_regno] >= 0);
2562 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
2563 ? ALLOCNO_COVER_CLASS_COST (a)
2564 : ALLOCNO_HARD_REG_COSTS (a)
2565 [ira_class_hard_reg_index[cover_class][old_hard_regno]]);
2566 update_copy_costs (a, false);
2568 ira_overall_cost -= cost;
2569 ALLOCNO_HARD_REGNO (a) = hard_regno;
2572 ALLOCNO_HARD_REGNO (a) = -1;
2573 cost += ALLOCNO_MEMORY_COST (a);
2575 else if (ira_class_hard_reg_index[cover_class][hard_regno] >= 0)
2577 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
2578 ? ALLOCNO_COVER_CLASS_COST (a)
2579 : ALLOCNO_HARD_REG_COSTS (a)
2580 [ira_class_hard_reg_index[cover_class][hard_regno]]);
2581 update_copy_costs (a, true);
2584 /* Reload changed class of the allocno. */
2586 ira_overall_cost += cost;
2589 /* This function is called when reload deletes memory-memory move. In
2590 this case we marks that the allocation of the corresponding
2591 allocnos should be not changed in future. Otherwise we risk to get
2594 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
2596 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
2597 ira_allocno_t src = ira_regno_allocno_map[src_regno];
2599 ira_assert (dst != NULL && src != NULL
2600 && ALLOCNO_HARD_REGNO (dst) < 0
2601 && ALLOCNO_HARD_REGNO (src) < 0);
2602 ALLOCNO_DONT_REASSIGN_P (dst) = true;
2603 ALLOCNO_DONT_REASSIGN_P (src) = true;
2606 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
2607 allocno A and return TRUE in the case of success. That is an
2608 analog of retry_global_alloc for IRA. */
2610 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
2613 enum reg_class cover_class;
2614 int regno = ALLOCNO_REGNO (a);
2616 IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), forbidden_regs);
2617 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
2618 IOR_HARD_REG_SET (ALLOCNO_TOTAL_CONFLICT_HARD_REGS (a), call_used_reg_set);
2619 ALLOCNO_ASSIGNED_P (a) = false;
2620 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2621 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2622 cover_class = ALLOCNO_COVER_CLASS (a);
2623 update_curr_costs (a);
2624 assign_hard_reg (a, true);
2625 hard_regno = ALLOCNO_HARD_REGNO (a);
2626 reg_renumber[regno] = hard_regno;
2628 ALLOCNO_HARD_REGNO (a) = -1;
2631 ira_assert (ira_class_hard_reg_index[cover_class][hard_regno] >= 0);
2632 ira_overall_cost -= (ALLOCNO_MEMORY_COST (a)
2633 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
2634 ? ALLOCNO_COVER_CLASS_COST (a)
2635 : ALLOCNO_HARD_REG_COSTS (a)
2636 [ira_class_hard_reg_index
2637 [cover_class][hard_regno]]));
2638 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
2639 && ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
2642 ira_assert (flag_caller_saves);
2643 caller_save_needed = 1;
2647 /* If we found a hard register, modify the RTL for the pseudo
2648 register to show the hard register, and mark the pseudo register
2650 if (reg_renumber[regno] >= 0)
2652 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2653 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
2654 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
2655 mark_home_live (regno);
2657 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2658 fprintf (ira_dump_file, "\n");
2660 return reg_renumber[regno] >= 0;
2663 /* Sort pseudos according their usage frequencies (putting most
2664 frequently ones first). */
2666 pseudo_reg_compare (const void *v1p, const void *v2p)
2668 int regno1 = *(const int *) v1p;
2669 int regno2 = *(const int *) v2p;
2672 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
2674 return regno1 - regno2;
2677 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
2678 NUM of them) or spilled pseudos conflicting with pseudos in
2679 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
2680 allocation has been changed. The function doesn't use
2681 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
2682 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
2683 is called by the reload pass at the end of each reload
2686 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
2687 HARD_REG_SET bad_spill_regs,
2688 HARD_REG_SET *pseudo_forbidden_regs,
2689 HARD_REG_SET *pseudo_previous_regs, bitmap spilled)
2693 ira_allocno_t a, conflict_a;
2694 HARD_REG_SET forbidden_regs;
2695 ira_allocno_conflict_iterator aci;
2698 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
2700 /* Try to assign hard registers to pseudos from
2701 SPILLED_PSEUDO_REGS. */
2702 for (m = i = 0; i < num; i++)
2704 regno = spilled_pseudo_regs[i];
2705 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
2706 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
2707 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
2708 gcc_assert (reg_renumber[regno] < 0);
2709 a = ira_regno_allocno_map[regno];
2710 ira_mark_allocation_change (regno);
2711 ira_assert (reg_renumber[regno] < 0);
2712 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2713 fprintf (ira_dump_file,
2714 " Spill %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
2715 ALLOCNO_MEMORY_COST (a)
2716 - ALLOCNO_COVER_CLASS_COST (a));
2717 allocno_reload_assign (a, forbidden_regs);
2718 if (reg_renumber[regno] >= 0)
2720 CLEAR_REGNO_REG_SET (spilled, regno);
2724 spilled_pseudo_regs[m++] = regno;
2728 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2730 fprintf (ira_dump_file, " Spilled regs");
2731 for (i = 0; i < m; i++)
2732 fprintf (ira_dump_file, " %d", spilled_pseudo_regs[i]);
2733 fprintf (ira_dump_file, "\n");
2735 /* Try to assign hard registers to pseudos conflicting with ones
2736 from SPILLED_PSEUDO_REGS. */
2737 for (i = n = 0; i < m; i++)
2739 regno = spilled_pseudo_regs[i];
2740 a = ira_regno_allocno_map[regno];
2741 FOR_EACH_ALLOCNO_CONFLICT (a, conflict_a, aci)
2742 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
2743 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
2744 && ! bitmap_bit_p (consideration_allocno_bitmap,
2745 ALLOCNO_NUM (conflict_a)))
2747 sorted_allocnos[n++] = conflict_a;
2748 bitmap_set_bit (consideration_allocno_bitmap,
2749 ALLOCNO_NUM (conflict_a));
2754 setup_allocno_priorities (sorted_allocnos, n);
2755 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2756 allocno_priority_compare_func);
2757 for (i = 0; i < n; i++)
2759 a = sorted_allocnos[i];
2760 regno = ALLOCNO_REGNO (a);
2761 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
2762 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
2763 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
2764 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2765 fprintf (ira_dump_file,
2766 " Try assign %d(a%d), cost=%d",
2767 regno, ALLOCNO_NUM (a),
2768 ALLOCNO_MEMORY_COST (a)
2769 - ALLOCNO_COVER_CLASS_COST (a));
2770 if (allocno_reload_assign (a, forbidden_regs))
2773 bitmap_clear_bit (spilled, regno);
2780 /* The function is called by reload and returns already allocated
2781 stack slot (if any) for REGNO with given INHERENT_SIZE and
2782 TOTAL_SIZE. In the case of failure to find a slot which can be
2783 used for REGNO, the function returns NULL. */
2785 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
2786 unsigned int total_size)
2789 int slot_num, best_slot_num;
2790 int cost, best_cost;
2791 ira_copy_t cp, next_cp;
2792 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
2795 struct ira_spilled_reg_stack_slot *slot = NULL;
2797 ira_assert (flag_ira && inherent_size == PSEUDO_REGNO_BYTES (regno)
2798 && inherent_size <= total_size
2799 && ALLOCNO_HARD_REGNO (allocno) < 0);
2800 if (! flag_ira_share_spill_slots)
2802 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
2805 slot = &ira_spilled_reg_stack_slots[slot_num];
2810 best_cost = best_slot_num = -1;
2812 /* It means that the pseudo was spilled in the reload pass, try
2815 slot_num < ira_spilled_reg_stack_slots_num;
2818 slot = &ira_spilled_reg_stack_slots[slot_num];
2819 if (slot->mem == NULL_RTX)
2821 if (slot->width < total_size
2822 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
2825 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
2826 FIRST_PSEUDO_REGISTER, i, bi)
2828 another_allocno = ira_regno_allocno_map[i];
2829 if (ira_allocno_live_ranges_intersect_p (allocno,
2833 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
2837 if (cp->first == allocno)
2839 next_cp = cp->next_first_allocno_copy;
2840 another_allocno = cp->second;
2842 else if (cp->second == allocno)
2844 next_cp = cp->next_second_allocno_copy;
2845 another_allocno = cp->first;
2849 if (cp->insn == NULL_RTX)
2851 if (bitmap_bit_p (&slot->spilled_regs,
2852 ALLOCNO_REGNO (another_allocno)))
2855 if (cost > best_cost)
2858 best_slot_num = slot_num;
2865 slot_num = best_slot_num;
2866 slot = &ira_spilled_reg_stack_slots[slot_num];
2867 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
2869 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
2874 ira_assert (slot->width >= total_size);
2875 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
2876 FIRST_PSEUDO_REGISTER, i, bi)
2878 ira_assert (! ira_pseudo_live_ranges_intersect_p (regno, i));
2880 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
2881 if (internal_flag_ira_verbose > 3 && ira_dump_file)
2883 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
2884 regno, REG_FREQ (regno), slot_num);
2885 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
2886 FIRST_PSEUDO_REGISTER, i, bi)
2888 if ((unsigned) regno != i)
2889 fprintf (ira_dump_file, " %d", i);
2891 fprintf (ira_dump_file, "\n");
2897 /* This is called by reload every time a new stack slot X with
2898 TOTAL_SIZE was allocated for REGNO. We store this info for
2899 subsequent ira_reuse_stack_slot calls. */
2901 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
2903 struct ira_spilled_reg_stack_slot *slot;
2905 ira_allocno_t allocno;
2907 ira_assert (flag_ira && PSEUDO_REGNO_BYTES (regno) <= total_size);
2908 allocno = ira_regno_allocno_map[regno];
2909 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
2912 slot_num = ira_spilled_reg_stack_slots_num++;
2913 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
2915 slot = &ira_spilled_reg_stack_slots[slot_num];
2916 INIT_REG_SET (&slot->spilled_regs);
2917 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
2919 slot->width = total_size;
2920 if (internal_flag_ira_verbose > 3 && ira_dump_file)
2921 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
2922 regno, REG_FREQ (regno), slot_num);
2926 /* Return spill cost for pseudo-registers whose numbers are in array
2927 REGNOS (with a negative number as an end marker) for reload with
2928 given IN and OUT for INSN. Return also number points (through
2929 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
2930 the register pressure is high, number of references of the
2931 pseudo-registers (through NREFS), number of callee-clobbered
2932 hard-registers occupied by the pseudo-registers (through
2933 CALL_USED_COUNT), and the first hard regno occupied by the
2934 pseudo-registers (through FIRST_HARD_REGNO). */
2936 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
2937 int *excess_pressure_live_length,
2938 int *nrefs, int *call_used_count, int *first_hard_regno)
2940 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
2946 for (length = count = cost = i = 0;; i++)
2951 *nrefs += REG_N_REFS (regno);
2952 hard_regno = reg_renumber[regno];
2953 ira_assert (hard_regno >= 0);
2954 a = ira_regno_allocno_map[regno];
2955 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2956 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a);
2957 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
2958 for (j = 0; j < nregs; j++)
2959 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
2963 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
2964 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
2966 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
2970 saved_cost += ira_memory_move_cost
2971 [ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)][1];
2974 += ira_memory_move_cost
2975 [ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)][0];
2976 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
2979 *excess_pressure_live_length = length;
2980 *call_used_count = count;
2984 hard_regno = reg_renumber[regnos[0]];
2986 *first_hard_regno = hard_regno;
2990 /* Return TRUE if spilling pseudo-registers whose numbers are in array
2991 REGNOS is better than spilling pseudo-registers with numbers in
2992 OTHER_REGNOS for reload with given IN and OUT for INSN. The
2993 function used by the reload pass to make better register spilling
2996 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
2997 rtx in, rtx out, rtx insn)
2999 int cost, other_cost;
3000 int length, other_length;
3001 int nrefs, other_nrefs;
3002 int call_used_count, other_call_used_count;
3003 int hard_regno, other_hard_regno;
3005 cost = calculate_spill_cost (regnos, in, out, insn,
3006 &length, &nrefs, &call_used_count, &hard_regno);
3007 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
3008 &other_length, &other_nrefs,
3009 &other_call_used_count,
3011 if (nrefs == 0 && other_nrefs != 0)
3013 if (nrefs != 0 && other_nrefs == 0)
3015 if (cost != other_cost)
3016 return cost < other_cost;
3017 if (length != other_length)
3018 return length > other_length;
3019 #ifdef REG_ALLOC_ORDER
3020 if (hard_regno >= 0 && other_hard_regno >= 0)
3021 return (inv_reg_alloc_order[hard_regno]
3022 < inv_reg_alloc_order[other_hard_regno]);
3024 if (call_used_count != other_call_used_count)
3025 return call_used_count > other_call_used_count;
3032 /* Allocate and initialize data necessary for assign_hard_reg. */
3034 ira_initiate_assign (void)
3037 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
3038 * ira_allocnos_num);
3039 consideration_allocno_bitmap = ira_allocate_bitmap ();
3040 initiate_cost_update ();
3041 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
3044 /* Deallocate data used by assign_hard_reg. */
3046 ira_finish_assign (void)
3048 ira_free (sorted_allocnos);
3049 ira_free_bitmap (consideration_allocno_bitmap);
3050 finish_cost_update ();
3051 ira_free (allocno_priorities);
3056 /* Entry function doing color-based register allocation. */
3060 allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
3061 removed_splay_allocno_vec
3062 = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
3063 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
3064 ira_initiate_assign ();
3066 ira_finish_assign ();
3067 VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec);
3068 VEC_free (ira_allocno_t, heap, allocno_stack_vec);
3069 move_spill_restore ();
3074 /* This page contains a simple register allocator without usage of
3075 allocno conflicts. This is used for fast allocation for -O0. */
3077 /* Do register allocation by not using allocno conflicts. It uses
3078 only allocno live ranges. The algorithm is close to Chow's
3079 priority coloring. */
3081 fast_allocation (void)
3083 int i, j, k, num, class_size, hard_regno;
3085 bool no_stack_reg_p;
3087 enum reg_class cover_class;
3088 enum machine_mode mode;
3090 ira_allocno_iterator ai;
3091 allocno_live_range_t r;
3092 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
3094 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
3095 * ira_allocnos_num);
3097 FOR_EACH_ALLOCNO (a, ai)
3098 sorted_allocnos[num++] = a;
3099 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
3100 setup_allocno_priorities (sorted_allocnos, num);
3101 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
3103 for (i = 0; i < ira_max_point; i++)
3104 CLEAR_HARD_REG_SET (used_hard_regs[i]);
3105 qsort (sorted_allocnos, ira_allocnos_num, sizeof (ira_allocno_t),
3106 allocno_priority_compare_func);
3107 for (i = 0; i < num; i++)
3109 a = sorted_allocnos[i];
3110 COPY_HARD_REG_SET (conflict_hard_regs, ALLOCNO_CONFLICT_HARD_REGS (a));
3111 for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
3112 for (j = r->start; j <= r->finish; j++)
3113 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
3114 cover_class = ALLOCNO_COVER_CLASS (a);
3115 ALLOCNO_ASSIGNED_P (a) = true;
3116 ALLOCNO_HARD_REGNO (a) = -1;
3117 if (hard_reg_set_subset_p (reg_class_contents[cover_class],
3118 conflict_hard_regs))
3120 mode = ALLOCNO_MODE (a);
3122 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
3124 class_size = ira_class_hard_regs_num[cover_class];
3125 for (j = 0; j < class_size; j++)
3127 hard_regno = ira_class_hard_regs[cover_class][j];
3129 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
3130 && hard_regno <= LAST_STACK_REG)
3133 if (!ira_hard_reg_not_in_set_p (hard_regno, mode, conflict_hard_regs)
3134 || (TEST_HARD_REG_BIT
3135 (prohibited_class_mode_regs[cover_class][mode], hard_regno)))
3137 ALLOCNO_HARD_REGNO (a) = hard_regno;
3138 for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
3139 for (k = r->start; k <= r->finish; k++)
3140 IOR_HARD_REG_SET (used_hard_regs[k],
3141 ira_reg_mode_hard_regset[hard_regno][mode]);
3145 ira_free (sorted_allocnos);
3146 ira_free (used_hard_regs);
3147 ira_free (allocno_priorities);
3148 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3149 ira_print_disposition (ira_dump_file);
3154 /* Entry function doing coloring. */
3159 ira_allocno_iterator ai;
3161 /* Setup updated costs. */
3162 FOR_EACH_ALLOCNO (a, ai)
3164 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3165 ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);