From: Vladimir Makarov Date: Mon, 16 Dec 2013 18:24:54 +0000 (+0000) Subject: re PR rtl-optimization/59466 (Slow code generation by LRA for memory addresses on... X-Git-Tag: upstream/12.2.0~65866 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=91c5ee5b4ac9c85b6cbf596dc7917910b0d7b1b3;p=platform%2Fupstream%2Fgcc.git re PR rtl-optimization/59466 (Slow code generation by LRA for memory addresses on PPC) 2013-12-16 Vladimir Makarov PR rtl-optimization/59466 * emit-rtl.c (change_address_1): Don't validate address for LRA. * recog.c (general_operand): Accept any memory for LRA. * lra.c (lra_set_insn_recog_data): Add an assert. From-SVN: r206023 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index f279da3..941753a 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2013-12-16 Vladimir Makarov + + PR rtl-optimization/59466 + * emit-rtl.c (change_address_1): Don't validate address for LRA. + * recog.c (general_operand): Accept any memory for LRA. + * lra.c (lra_set_insn_recog_data): Add an assert. + 2013-12-16 Kyrylo Tkachov * config/arm/driver-arm.c (arm_cpu_table): Add cortex-a12 entry. diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index d7fa3a5..ad63d0b 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -1951,7 +1951,9 @@ change_address_1 (rtx memref, enum machine_mode mode, rtx addr, int validate) && (!validate || memory_address_addr_space_p (mode, addr, as))) return memref; - if (validate) + /* Don't validate address for LRA. LRA can make the address valid + by itself in most efficient way. */ + if (validate && !lra_in_progress) { if (reload_in_progress || reload_completed) gcc_assert (memory_address_addr_space_p (mode, addr, as)); diff --git a/gcc/lra.c b/gcc/lra.c index 1491fc7..ed070c7 100644 --- a/gcc/lra.c +++ b/gcc/lra.c @@ -1072,9 +1072,16 @@ lra_set_insn_recog_data (rtx insn) nop = asm_noperands (PATTERN (insn)); data->operand_loc = data->dup_loc = NULL; if (nop < 0) - /* Its is a special insn like USE or CLOBBER. */ - data->insn_static_data = insn_static_data - = get_static_insn_data (-1, 0, 0, 1); + { + /* Its is a special insn like USE or CLOBBER. We should + recognize any regular insn otherwise LRA can do nothing + with this insn. */ + gcc_assert (GET_CODE (PATTERN (insn)) == USE + || GET_CODE (PATTERN (insn)) == CLOBBER + || GET_CODE (PATTERN (insn)) == ASM_INPUT); + data->insn_static_data = insn_static_data + = get_static_insn_data (-1, 0, 0, 1); + } else { /* expand_asm_operands makes sure there aren't too many diff --git a/gcc/recog.c b/gcc/recog.c index dbd9a8a..37e7692 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -1021,8 +1021,12 @@ general_operand (rtx op, enum machine_mode mode) if (! volatile_ok && MEM_VOLATILE_P (op)) return 0; - /* Use the mem's mode, since it will be reloaded thus. */ - if (memory_address_addr_space_p (GET_MODE (op), y, MEM_ADDR_SPACE (op))) + /* Use the mem's mode, since it will be reloaded thus. LRA can + generate move insn with invalid addresses which is made valid + and efficiently calculated by LRA through further numerous + transformations. */ + if (lra_in_progress + || memory_address_addr_space_p (GET_MODE (op), y, MEM_ADDR_SPACE (op))) return 1; }