+2008-11-14 Vladimir Makarov <vmakarov@redhat.com>
+
+ PR rtl-optimization/37397
+ * ira-int.h (struct ira_allocno): New member bad_spill_p.
+ (ALLOCNO_BAD_SPILL_P): New macro.
+
+ * ira-color.c (push_allocnos_to_stack): Check ALLOCNO_BAD_SPILL_P.
+
+ * ira-build.c (ira_create_allocno): Initialize
+ ALLOCNO_BAD_SPILL_P.
+ (create_cap_allocno, propagate_allocno_info,
+ remove_unnecessary_allocnos): Set up or update
+ ALLOCNO_BAD_SPILL_P.
+ (update_bad_spill_attribute): New function.
+ (ira_build): Call it.
+
+ * ira-costs.c (record_reg_classes): Set up ALLOCNO_BAD_SPILL_P.
+
2008-11-14 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/38104
ALLOCNO_SOMEWHERE_RENAMED_P (a) = false;
ALLOCNO_CHILD_RENAMED_P (a) = false;
ALLOCNO_DONT_REASSIGN_P (a) = false;
+ ALLOCNO_BAD_SPILL_P (a) = false;
ALLOCNO_IN_GRAPH_P (a) = false;
ALLOCNO_ASSIGNED_P (a) = false;
ALLOCNO_MAY_BE_SPILLED_P (a) = false;
ira_allocate_and_copy_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), cover_class,
ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
+ ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
&& bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
ALLOCNO_NUM (a)))
{
+ if (! ALLOCNO_BAD_SPILL_P (a))
+ ALLOCNO_BAD_SPILL_P (parent_a) = false;
ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
+= ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
+ if (! ALLOCNO_BAD_SPILL_P (a))
+ ALLOCNO_BAD_SPILL_P (parent_a) = false;
#ifdef STACK_REGS
if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
ALLOCNO_TOTAL_NO_STACK_REG_P (parent_a) = true;
\f
+/* At this point true value of allocno attribute bad_spill_p means
+ that there is an insn where allocno occurs and where the allocno
+ can not be used as memory. The function updates the attribute, now
+ it can be true only for allocnos which can not be used as memory in
+ an insn and in whose live ranges there is other allocno deaths.
+ Spilling allocnos with true value will not improve the code because
+ it will not make other allocnos colorable and additional reloads
+ for the corresponding pseudo will be generated in reload pass for
+ each insn it occurs.
+
+ This is a trick mentioned in one classic article of Chaitin etc
+ which is frequently omitted in other implementations of RA based on
+ graph coloring. */
+static void
+update_bad_spill_attribute (void)
+{
+ int i;
+ ira_allocno_t a;
+ ira_allocno_iterator ai;
+ allocno_live_range_t r;
+ enum reg_class cover_class;
+ bitmap_head dead_points[N_REG_CLASSES];
+
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ {
+ cover_class = ira_reg_class_cover[i];
+ bitmap_initialize (&dead_points[cover_class], ®_obstack);
+ }
+ FOR_EACH_ALLOCNO (a, ai)
+ {
+ cover_class = ALLOCNO_COVER_CLASS (a);
+ if (cover_class == NO_REGS)
+ continue;
+ for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
+ bitmap_set_bit (&dead_points[cover_class], r->finish);
+ }
+ FOR_EACH_ALLOCNO (a, ai)
+ {
+ cover_class = ALLOCNO_COVER_CLASS (a);
+ if (cover_class == NO_REGS)
+ continue;
+ if (! ALLOCNO_BAD_SPILL_P (a))
+ continue;
+ for (r = ALLOCNO_LIVE_RANGES (a); r != NULL; r = r->next)
+ {
+ for (i = r->start + 1; i < r->finish; i++)
+ if (bitmap_bit_p (&dead_points[cover_class], i))
+ break;
+ if (i < r->finish)
+ break;
+ }
+ if (r != NULL)
+ ALLOCNO_BAD_SPILL_P (a) = false;
+ }
+ for (i = 0; i < ira_reg_class_cover_size; i++)
+ {
+ cover_class = ira_reg_class_cover[i];
+ bitmap_clear (&dead_points[cover_class]);
+ }
+}
+
+\f
+
/* Set up minimal and maximal live range points for allocnos. */
static void
setup_min_max_allocno_live_range_point (void)
ira_create_allocno_live_ranges ();
remove_unnecessary_regions ();
ira_compress_allocno_live_ranges ();
+ update_bad_spill_attribute ();
loops_p = more_one_region_p ();
if (loops_p)
{
* ira_reg_class_nregs[ALLOCNO_COVER_CLASS
(i_allocno)]
[ALLOCNO_MODE (i_allocno)] + 1));
- if (allocno == NULL || allocno_pri > i_allocno_pri
+ if (allocno == NULL
+ || (! ALLOCNO_BAD_SPILL_P (i_allocno)
+ && ALLOCNO_BAD_SPILL_P (allocno))
+ || allocno_pri > i_allocno_pri
|| (allocno_pri == i_allocno_pri
&& (allocno_cost > i_allocno_cost
|| (allocno_cost == i_allocno_cost
int alt;
int i, j, k;
rtx set;
+ int insn_allows_mem[MAX_RECOG_OPERANDS];
+
+ for (i = 0; i < n_ops; i++)
+ insn_allows_mem[i] = 0;
/* Process each alternative, each time minimizing an operand's cost
with the cost for each operand in that alternative. */
j = p[0] - '0';
classes[i] = classes[j];
allows_mem[i] = allows_mem[j];
+ if (allows_mem[i])
+ insn_allows_mem[i] = 1;
if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
{
+ (recog_data.operand_type[i] != OP_OUT
? ira_memory_move_cost[mode][classes[i]][1] : 0)
- allows_mem[i]) * frequency;
+
/* If we have assigned a class to this allocno in our
first pass, add a cost to this alternative
corresponding to what we would add if this allocno
/* It doesn't seem worth distinguishing between
offsettable and non-offsettable addresses
here. */
- allows_mem[i] = 1;
+ insn_allows_mem[i] = allows_mem[i] = 1;
if (MEM_P (op))
win = 1;
break;
|| (CONSTANT_P (op)
&& (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
win = 1;
- allows_mem[i] = 1;
+ insn_allows_mem[i] = allows_mem[i] = 1;
case 'r':
classes[i] = ira_reg_class_union[classes[i]][GENERAL_REGS];
break;
if (EXTRA_MEMORY_CONSTRAINT (c, p))
{
/* Every MEM can be reloaded to fit. */
- allows_mem[i] = 1;
+ insn_allows_mem[i] = allows_mem[i] = 1;
if (MEM_P (op))
win = 1;
}
}
}
+ for (i = 0; i < n_ops; i++)
+ {
+ ira_allocno_t a;
+ rtx op = ops[i];
+
+ if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
+ continue;
+ a = ira_curr_regno_allocno_map [REGNO (op)];
+ if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
+ ALLOCNO_BAD_SPILL_P (a) = true;
+ }
+
/* If this insn is a single set copying operand 1 to operand 0 and
one operand is an allocno with the other a hard reg or an allocno
that prefers a hard register that is in its own register class
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
break;
+ ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[REGNO (x)]) = true;
pp = COSTS_OF_ALLOCNO (allocno_costs,
ALLOCNO_NUM (ira_curr_regno_allocno_map
[REGNO (x)]));
region and all its subregions recursively. */
unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
#endif
+ /* TRUE value means that there is no sense to spill the allocno
+ during coloring because the spill will result in additional
+ reloads in reload pass. */
+ unsigned int bad_spill_p : 1;
/* TRUE value means that the allocno was not removed yet from the
conflicting graph during colouring. */
unsigned int in_graph_p : 1;
#define ALLOCNO_NO_STACK_REG_P(A) ((A)->no_stack_reg_p)
#define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
#endif
+#define ALLOCNO_BAD_SPILL_P(A) ((A)->bad_spill_p)
#define ALLOCNO_IN_GRAPH_P(A) ((A)->in_graph_p)
#define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
#define ALLOCNO_MAY_BE_SPILLED_P(A) ((A)->may_be_spilled_p)