From: Vladimir Makarov Date: Tue, 13 Mar 2018 20:42:49 +0000 (+0000) Subject: re PR target/83712 ("Unable to find a register to spill" when compiling for thumb1) X-Git-Tag: upstream/12.2.0~32893 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6027ea4cd7b1e955cf7cd61e9178a89d89e3dbe2;p=platform%2Fupstream%2Fgcc.git re PR target/83712 ("Unable to find a register to spill" when compiling for thumb1) 2018-03-13 Vladimir Makarov PR target/83712 * lra-assigns.c (find_all_spills_for): Ignore uninteresting pseudos. (assign_by_spills): Return a flag of reload assignment failure. Do not process the reload assignment failures. Do not spill other reload pseudos if they has the same reg class. Update n if necessary. (lra_assign): Add a return arg. Set up from the result of assign_by_spills call. (find_reload_regno_insns, lra_split_hard_reg_for): New functions. * lra-constraints.c (split_reg): Add a new arg. Use it instead of usage_insns if it is not NULL. (spill_hard_reg_in_range): New function. (split_if_necessary, inherit_in_ebb): Pass a new arg to split_reg. * lra-int.h (spill_hard_reg_in_range, lra_split_hard_reg_for): New function prototypes. (lra_assign): Change prototype. * lra.c (lra): Add code to deal with fails by splitting hard reg live ranges. From-SVN: r258504 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index d4d61a0..5eaafcf 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,25 @@ +2018-03-13 Vladimir Makarov + + PR target/83712 + * lra-assigns.c (find_all_spills_for): Ignore uninteresting + pseudos. + (assign_by_spills): Return a flag of reload assignment failure. + Do not process the reload assignment failures. Do not spill other + reload pseudos if they has the same reg class. Update n if + necessary. + (lra_assign): Add a return arg. Set up from the result of + assign_by_spills call. + (find_reload_regno_insns, lra_split_hard_reg_for): New functions. + * lra-constraints.c (split_reg): Add a new arg. Use it instead of + usage_insns if it is not NULL. + (spill_hard_reg_in_range): New function. + (split_if_necessary, inherit_in_ebb): Pass a new arg to split_reg. + * lra-int.h (spill_hard_reg_in_range, lra_split_hard_reg_for): New + function prototypes. + (lra_assign): Change prototype. + * lra.c (lra): Add code to deal with fails by splitting hard reg + live ranges. + 2018-03-01 Palmer Dabbelt * config/riscv/riscv.opt (mrelax): New option. diff --git a/gcc/lra-assigns.c b/gcc/lra-assigns.c index 2bfd091..6d16037 100644 --- a/gcc/lra-assigns.c +++ b/gcc/lra-assigns.c @@ -1339,24 +1339,33 @@ find_all_spills_for (int regno) r2 = r2->start_next) { if (live_pseudos_reg_renumber[r2->regno] >= 0 - && rclass_intersect_p[regno_allocno_class_array[r2->regno]]) + && ! sparseset_bit_p (live_range_hard_reg_pseudos, r2->regno) + && rclass_intersect_p[regno_allocno_class_array[r2->regno]] + && ((int) r2->regno < lra_constraint_new_regno_start + || bitmap_bit_p (&lra_inheritance_pseudos, r2->regno) + || bitmap_bit_p (&lra_split_regs, r2->regno) + || bitmap_bit_p (&lra_optional_reload_pseudos, r2->regno) + /* There is no sense to consider another reload + pseudo if it has the same class. */ + || regno_allocno_class_array[r2->regno] != rclass)) sparseset_set_bit (live_range_hard_reg_pseudos, r2->regno); } } } } -/* Assign hard registers to reload pseudos and other pseudos. */ -static void +/* Assign hard registers to reload pseudos and other pseudos. Return + true if we was not able to assign hard registers to all reload + pseudos. */ +static bool assign_by_spills (void) { - int i, n, nfails, iter, regno, hard_regno, cost; + int i, n, nfails, iter, regno, regno2, hard_regno, cost; rtx restore_rtx; - rtx_insn *insn; bitmap_head changed_insns, do_not_assign_nonreload_pseudos; unsigned int u, conflict_regno; bitmap_iterator bi; - bool reload_p; + bool reload_p, fails_p = false; int max_regno = max_reg_num (); for (n = 0, i = lra_constraint_new_regno_start; i < max_regno; i++) @@ -1399,8 +1408,13 @@ assign_by_spills (void) hard_regno = spill_for (regno, &all_spilled_pseudos, iter == 1); if (hard_regno < 0) { - if (reload_p) + if (reload_p) { + /* Put unassigned reload pseudo first in the + array. */ + regno2 = sorted_pseudos[nfails]; sorted_pseudos[nfails++] = regno; + sorted_pseudos[i] = regno2; + } } else { @@ -1415,61 +1429,9 @@ assign_by_spills (void) bitmap_set_bit (&changed_pseudo_bitmap, regno); } } - if (nfails == 0) - break; - if (iter > 0) + if (nfails == 0 || iter > 0) { - /* We did not assign hard regs to reload pseudos after two iterations. - Either it's an asm and something is wrong with the constraints, or - we have run out of spill registers; error out in either case. */ - bool asm_p = false; - bitmap_head failed_reload_insns; - - bitmap_initialize (&failed_reload_insns, ®_obstack); - for (i = 0; i < nfails; i++) - { - regno = sorted_pseudos[i]; - bitmap_ior_into (&failed_reload_insns, - &lra_reg_info[regno].insn_bitmap); - /* Assign an arbitrary hard register of regno class to - avoid further trouble with this insn. */ - bitmap_clear_bit (&all_spilled_pseudos, regno); - assign_hard_regno - (ira_class_hard_regs[regno_allocno_class_array[regno]][0], - regno); - } - EXECUTE_IF_SET_IN_BITMAP (&failed_reload_insns, 0, u, bi) - { - insn = lra_insn_recog_data[u]->insn; - if (asm_noperands (PATTERN (insn)) >= 0) - { - asm_p = true; - error_for_asm (insn, - "% operand has impossible constraints"); - /* Avoid further trouble with this insn. - For asm goto, instead of fixing up all the edges - just clear the template and clear input operands - (asm goto doesn't have any output operands). */ - if (JUMP_P (insn)) - { - rtx asm_op = extract_asm_operands (PATTERN (insn)); - ASM_OPERANDS_TEMPLATE (asm_op) = ggc_strdup (""); - ASM_OPERANDS_INPUT_VEC (asm_op) = rtvec_alloc (0); - ASM_OPERANDS_INPUT_CONSTRAINT_VEC (asm_op) = rtvec_alloc (0); - lra_update_insn_regno_info (insn); - } - else - { - PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx); - lra_set_insn_deleted (insn); - } - } - else if (!asm_p) - { - error ("unable to find a register to spill"); - fatal_insn ("this is the insn:", insn); - } - } + fails_p = nfails != 0; break; } /* This is a very rare event. We can not assign a hard register @@ -1518,7 +1480,8 @@ assign_by_spills (void) update_lives (conflict_regno, true); lra_setup_reg_renumber (conflict_regno, -1, false); } - n = nfails; + if (n < nfails) + n = nfails; } improve_inheritance (&changed_pseudo_bitmap); bitmap_clear (&non_reload_pseudos); @@ -1604,9 +1567,9 @@ assign_by_spills (void) bitmap_clear (&best_spill_pseudos_bitmap); bitmap_clear (&spill_pseudos_bitmap); bitmap_clear (&insn_conflict_pseudos); + return fails_p; } - /* Entry function to assign hard registers to new reload pseudos starting with LRA_CONSTRAINT_NEW_REGNO_START (by possible spilling of old pseudos) and possibly to the old pseudos. The function adds @@ -1615,9 +1578,10 @@ assign_by_spills (void) changed allocation. Return true if we did not spill any non-reload and non-inheritance - pseudos. */ + pseudos. Set up FAILS_P if we failed to assign hard registers to + all reload pseudos. */ bool -lra_assign (void) +lra_assign (bool &fails_p) { int i; unsigned int u; @@ -1661,7 +1625,7 @@ lra_assign (void) /* Setup insns to process on the next constraint pass. */ bitmap_initialize (&changed_pseudo_bitmap, ®_obstack); init_live_reload_and_inheritance_pseudos (); - assign_by_spills (); + fails_p = assign_by_spills (); finish_live_reload_and_inheritance_pseudos (); bitmap_ior_into (&changed_pseudo_bitmap, &all_spilled_pseudos); no_spills_p = true; @@ -1707,3 +1671,139 @@ lra_assign (void) return no_spills_p; } +/* Find start and finish insns for reload pseudo REGNO. Return true + if we managed to find the expected insns. Return false, + otherwise. */ +static bool +find_reload_regno_insns (int regno, rtx_insn * &start, rtx_insn * &finish) +{ + unsigned int uid; + bitmap_iterator bi; + int n = 0; + rtx_insn *prev_insn, *next_insn; + rtx_insn *start_insn = NULL, *first_insn = NULL, *second_insn = NULL; + + EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi) + { + if (start_insn == NULL) + start_insn = lra_insn_recog_data[uid]->insn; + n++; + } + /* For reload pseudo we should have at most 3 insns referring for it: + input/output reload insns and the original insn. */ + if (n > 3) + return false; + if (n > 1) + { + for (prev_insn = PREV_INSN (start_insn), + next_insn = NEXT_INSN (start_insn); + n != 1 && (prev_insn != NULL || next_insn != NULL); ) + { + if (prev_insn != NULL && first_insn == NULL) + { + if (! bitmap_bit_p (&lra_reg_info[regno].insn_bitmap, + INSN_UID (prev_insn))) + prev_insn = PREV_INSN (prev_insn); + else + { + first_insn = prev_insn; + n--; + } + } + if (next_insn != NULL && second_insn == NULL) + { + if (! bitmap_bit_p (&lra_reg_info[regno].insn_bitmap, + INSN_UID (next_insn))) + next_insn = NEXT_INSN (next_insn); + else + { + second_insn = next_insn; + n--; + } + } + } + if (n > 1) + return false; + } + start = first_insn != NULL ? first_insn : start_insn; + finish = second_insn != NULL ? second_insn : start_insn; + return true; +} + +/* Process reload pseudos which did not get a hard reg, split a hard + reg live range in live range of a reload pseudo, and then return + TRUE. If we did not split a hard reg live range, report an error, + and return FALSE. */ +bool +lra_split_hard_reg_for (void) +{ + int i, regno, n; + rtx_insn *insn, *first, *last; + unsigned int u; + bitmap_iterator bi; + int max_regno = max_reg_num (); + /* We did not assign hard regs to reload pseudos after two + iterations. Either it's an asm and something is wrong with the + constraints, or we have run out of spill registers; error out in + either case. */ + bool asm_p = false; + bitmap_head failed_reload_insns; + + if (lra_dump_file != NULL) + fprintf (lra_dump_file, + "\n****** Splitting a hard reg after assignment #%d: ******\n\n", + lra_assignment_iter); + for (n = 0, i = lra_constraint_new_regno_start; i < max_regno; i++) + if (reg_renumber[i] < 0 && lra_reg_info[i].nrefs != 0 + && regno_allocno_class_array[i] != NO_REGS + && ! bitmap_bit_p (&non_reload_pseudos, i)) + { + sorted_pseudos[n++] = i; + if (! find_reload_regno_insns (i, first, last)) + continue; + if (spill_hard_reg_in_range (i, regno_allocno_class_array[i], + first, last)) + return true; + } + bitmap_initialize (&failed_reload_insns, ®_obstack); + for (i = 0; i < n; i++) + { + regno = sorted_pseudos[i]; + bitmap_ior_into (&failed_reload_insns, + &lra_reg_info[regno].insn_bitmap); + lra_setup_reg_renumber (regno, ira_class_hard_regs[regno_allocno_class_array[regno]][0], false); + } + EXECUTE_IF_SET_IN_BITMAP (&failed_reload_insns, 0, u, bi) + { + insn = lra_insn_recog_data[u]->insn; + if (asm_noperands (PATTERN (insn)) >= 0) + { + asm_p = true; + error_for_asm (insn, + "% operand has impossible constraints"); + /* Avoid further trouble with this insn. + For asm goto, instead of fixing up all the edges + just clear the template and clear input operands + (asm goto doesn't have any output operands). */ + if (JUMP_P (insn)) + { + rtx asm_op = extract_asm_operands (PATTERN (insn)); + ASM_OPERANDS_TEMPLATE (asm_op) = ggc_strdup (""); + ASM_OPERANDS_INPUT_VEC (asm_op) = rtvec_alloc (0); + ASM_OPERANDS_INPUT_CONSTRAINT_VEC (asm_op) = rtvec_alloc (0); + lra_update_insn_regno_info (insn); + } + else + { + PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx); + lra_set_insn_deleted (insn); + } + } + else if (!asm_p) + { + error ("unable to find a register to spill"); + fatal_insn ("this is the insn:", insn); + } + } + return false; +} diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c index ae55c86..118b65f 100644 --- a/gcc/lra-constraints.c +++ b/gcc/lra-constraints.c @@ -5458,7 +5458,8 @@ lra_copy_reg_equiv (unsigned int new_regno, unsigned int original_regno) /* Do split transformations for insn INSN, which defines or uses ORIGINAL_REGNO. NEXT_USAGE_INSNS specifies which instruction in the EBB next uses ORIGINAL_REGNO; it has the same form as the - "insns" field of usage_insns. + "insns" field of usage_insns. If TO is not NULL, we don't use + usage_insns, we put restore insns after TO insn. The transformations look like: @@ -5480,7 +5481,7 @@ lra_copy_reg_equiv (unsigned int new_regno, unsigned int original_regno) transformation. */ static bool split_reg (bool before_p, int original_regno, rtx_insn *insn, - rtx next_usage_insns) + rtx next_usage_insns, rtx_insn *to) { enum reg_class rclass; rtx original_reg; @@ -5613,29 +5614,37 @@ split_reg (bool before_p, int original_regno, rtx_insn *insn, if (!HARD_REGISTER_NUM_P (original_regno) && mode == PSEUDO_REGNO_MODE (original_regno)) lra_copy_reg_equiv (new_regno, original_regno); - after_p = usage_insns[original_regno].after_p; lra_reg_info[new_regno].restore_rtx = regno_reg_rtx[original_regno]; bitmap_set_bit (&check_only_regs, new_regno); bitmap_set_bit (&check_only_regs, original_regno); bitmap_set_bit (&lra_split_regs, new_regno); - for (;;) + if (to != NULL) { - if (GET_CODE (next_usage_insns) != INSN_LIST) - { - usage_insn = next_usage_insns; - break; - } - usage_insn = XEXP (next_usage_insns, 0); - lra_assert (DEBUG_INSN_P (usage_insn)); - next_usage_insns = XEXP (next_usage_insns, 1); - lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false, - true); - lra_update_insn_regno_info (as_a (usage_insn)); - if (lra_dump_file != NULL) + usage_insn = to; + after_p = TRUE; + } + else + { + after_p = usage_insns[original_regno].after_p; + for (;;) { - fprintf (lra_dump_file, " Split reuse change %d->%d:\n", - original_regno, new_regno); - dump_insn_slim (lra_dump_file, as_a (usage_insn)); + if (GET_CODE (next_usage_insns) != INSN_LIST) + { + usage_insn = next_usage_insns; + break; + } + usage_insn = XEXP (next_usage_insns, 0); + lra_assert (DEBUG_INSN_P (usage_insn)); + next_usage_insns = XEXP (next_usage_insns, 1); + lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false, + true); + lra_update_insn_regno_info (as_a (usage_insn)); + if (lra_dump_file != NULL) + { + fprintf (lra_dump_file, " Split reuse change %d->%d:\n", + original_regno, new_regno); + dump_insn_slim (lra_dump_file, as_a (usage_insn)); + } } } lra_assert (NOTE_P (usage_insn) || NONDEBUG_INSN_P (usage_insn)); @@ -5662,6 +5671,35 @@ split_reg (bool before_p, int original_regno, rtx_insn *insn, return true; } +/* Split a hard reg for reload pseudo REGNO having RCLASS and living + in the range [FROM, TO]. Return true if did a split. Otherwise, + return false. */ +bool +spill_hard_reg_in_range (int regno, enum reg_class rclass, rtx_insn *from, rtx_insn *to) +{ + int i, hard_regno; + int rclass_size; + rtx_insn *insn; + + lra_assert (from != NULL && to != NULL); + rclass_size = ira_class_hard_regs_num[rclass]; + for (i = 0; i < rclass_size; i++) + { + hard_regno = ira_class_hard_regs[rclass][i]; + if (! TEST_HARD_REG_BIT (lra_reg_info[regno].conflict_hard_regs, hard_regno)) + continue; + for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn)) + if (bitmap_bit_p (&lra_reg_info[hard_regno].insn_bitmap, + INSN_UID (insn))) + break; + if (insn != NEXT_INSN (to)) + continue; + if (split_reg (TRUE, hard_regno, from, NULL, to)) + return true; + } + return false; +} + /* Recognize that we need a split transformation for insn INSN, which defines or uses REGNO in its insn biggest MODE (we use it only if REGNO is a hard register). POTENTIAL_RELOAD_HARD_REGS contains @@ -5689,7 +5727,7 @@ split_if_necessary (int regno, machine_mode mode, || (GET_CODE (next_usage_insns) == INSN_LIST && (INSN_UID (XEXP (next_usage_insns, 0)) < max_uid))) && need_for_split_p (potential_reload_hard_regs, regno + i) - && split_reg (before_p, regno + i, insn, next_usage_insns)) + && split_reg (before_p, regno + i, insn, next_usage_insns, NULL)) res = true; return res; } @@ -6467,7 +6505,7 @@ inherit_in_ebb (rtx_insn *head, rtx_insn *tail) head_p = false; } if (split_reg (false, j, bb_note (curr_bb), - next_usage_insns)) + next_usage_insns, NULL)) change_p = true; } usage_insns[j].check = 0; diff --git a/gcc/lra-int.h b/gcc/lra-int.h index 509e29e..86e103b 100644 --- a/gcc/lra-int.h +++ b/gcc/lra-int.h @@ -356,6 +356,7 @@ extern bool lra_constrain_insn (rtx_insn *); extern bool lra_constraints (bool); extern void lra_constraints_init (void); extern void lra_constraints_finish (void); +extern bool spill_hard_reg_in_range (int, enum reg_class, rtx_insn *, rtx_insn *); extern void lra_inheritance (void); extern bool lra_undo_inheritance (void); @@ -389,8 +390,8 @@ extern void lra_setup_reload_pseudo_preferenced_hard_reg (int, int, int); extern int lra_assignment_iter; extern int lra_assignment_iter_after_spill; extern void lra_setup_reg_renumber (int, int, bool); -extern bool lra_assign (void); - +extern bool lra_assign (bool &); +extern bool lra_split_hard_reg_for (void); /* lra-coalesce.c: */ diff --git a/gcc/lra.c b/gcc/lra.c index 0a25114..b410b90 100644 --- a/gcc/lra.c +++ b/gcc/lra.c @@ -2461,38 +2461,54 @@ lra (FILE *f) } if (live_p) lra_clear_live_ranges (); - /* We need live ranges for lra_assign -- so build them. But - don't remove dead insns or change global live info as we - can undo inheritance transformations after inheritance - pseudo assigning. */ - lra_create_live_ranges (true, false); - live_p = true; - /* If we don't spill non-reload and non-inheritance pseudos, - there is no sense to run memory-memory move coalescing. - If inheritance pseudos were spilled, the memory-memory - moves involving them will be removed by pass undoing - inheritance. */ - if (lra_simple_p) - lra_assign (); - else + bool fails_p; + do { - bool spill_p = !lra_assign (); - - if (lra_undo_inheritance ()) - live_p = false; - if (spill_p) + /* We need live ranges for lra_assign -- so build them. + But don't remove dead insns or change global live + info as we can undo inheritance transformations after + inheritance pseudo assigning. */ + lra_create_live_ranges (true, false); + live_p = true; + /* If we don't spill non-reload and non-inheritance + pseudos, there is no sense to run memory-memory move + coalescing. If inheritance pseudos were spilled, the + memory-memory moves involving them will be removed by + pass undoing inheritance. */ + if (lra_simple_p) + lra_assign (fails_p); + else { - if (! live_p) + bool spill_p = !lra_assign (fails_p); + + if (lra_undo_inheritance ()) + live_p = false; + if (spill_p && ! fails_p) { - lra_create_live_ranges (true, true); - live_p = true; + if (! live_p) + { + lra_create_live_ranges (true, true); + live_p = true; + } + if (lra_coalesce ()) + live_p = false; } - if (lra_coalesce ()) - live_p = false; + if (! live_p) + lra_clear_live_ranges (); + } + if (fails_p) + { + /* It is a very rare case. It is the last hope to + split a hard regno live range for a reload + pseudo. */ + if (live_p) + lra_clear_live_ranges (); + live_p = false; + if (! lra_split_hard_reg_for ()) + break; } - if (! live_p) - lra_clear_live_ranges (); } + while (fails_p); } /* Don't clear optional reloads bitmap until all constraints are satisfied as we need to differ them from regular reloads. */