/* Optimize jump instructions, for GNU compiler.
- Copyright (C) 1987-2014 Free Software Foundation, Inc.
+ Copyright (C) 1987-2020 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "tm.h"
+#include "backend.h"
+#include "target.h"
#include "rtl.h"
+#include "tree.h"
+#include "cfghooks.h"
+#include "tree-pass.h"
+#include "memmodel.h"
#include "tm_p.h"
-#include "flags.h"
-#include "hard-reg-set.h"
-#include "regs.h"
#include "insn-config.h"
-#include "insn-attr.h"
+#include "regs.h"
+#include "emit-rtl.h"
#include "recog.h"
-#include "function.h"
-#include "basic-block.h"
-#include "expr.h"
-#include "except.h"
-#include "diagnostic-core.h"
-#include "reload.h"
-#include "predict.h"
-#include "tree-pass.h"
-#include "target.h"
+#include "cfgrtl.h"
#include "rtl-iter.h"
/* Optimize jump y; x: ... y: jumpif... x?
static void init_label_info (rtx_insn *);
static void mark_all_labels (rtx_insn *);
-static void mark_jump_label_1 (rtx, rtx, bool, bool);
-static void mark_jump_label_asm (rtx, rtx);
-static void redirect_exp_1 (rtx *, rtx, rtx, rtx);
-static int invert_exp_1 (rtx, rtx);
+static void mark_jump_label_1 (rtx, rtx_insn *, bool, bool);
+static void mark_jump_label_asm (rtx, rtx_insn *);
+static void redirect_exp_1 (rtx *, rtx, rtx, rtx_insn *);
+static int invert_exp_1 (rtx, rtx_insn *);
\f
/* Worker for rebuild_jump_labels and rebuild_jump_labels_chain. */
static void
rebuild_jump_labels_1 (rtx_insn *f, bool count_forced)
{
- rtx_expr_list *insn;
-
timevar_push (TV_REBUILD_JUMP);
init_label_info (f);
mark_all_labels (f);
count doesn't drop to zero. */
if (count_forced)
- for (insn = forced_labels; insn; insn = insn->next ())
- if (LABEL_P (insn->element ()))
- LABEL_NUSES (insn->element ())++;
+ {
+ rtx_insn *insn;
+ unsigned int i;
+ FOR_EACH_VEC_SAFE_ELT (forced_labels, i, insn)
+ if (LABEL_P (insn))
+ LABEL_NUSES (insn)++;
+ }
timevar_pop (TV_REBUILD_JUMP);
}
{
if (BARRIER_P (insn))
{
- rtx_insn *prev = prev_nonnote_insn (insn);
+ rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
if (!prev)
continue;
- if (CALL_P (prev))
- {
- /* Make sure we do not split a call and its corresponding
- CALL_ARG_LOCATION note. */
- rtx_insn *next = NEXT_INSN (prev);
-
- if (NOTE_P (next)
- && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
- prev = next;
- }
-
if (BARRIER_P (prev))
delete_insn (insn);
else if (prev != PREV_INSN (insn))
- reorder_insns_nobb (insn, insn, prev);
+ {
+ basic_block bb = BLOCK_FOR_INSN (prev);
+ rtx_insn *end = PREV_INSN (insn);
+ reorder_insns_nobb (insn, insn, prev);
+ if (bb)
+ {
+ /* If the backend called in machine reorg compute_bb_for_insn
+ and didn't free_bb_for_insn again, preserve basic block
+ boundaries. Move the end of basic block to PREV since
+ it is followed by a barrier now, and clear BLOCK_FOR_INSN
+ on the following notes.
+ ??? Maybe the proper solution for the targets that have
+ cfg around after machine reorg is not to run cleanup_barriers
+ pass at all. */
+ BB_END (bb) = prev;
+ do
+ {
+ prev = NEXT_INSN (prev);
+ if (prev != insn && BLOCK_FOR_INSN (prev) == bb)
+ BLOCK_FOR_INSN (prev) = NULL;
+ }
+ while (prev != end);
+ }
+ }
}
}
return 0;
CODE_LABEL in the LABEL_REF of the "set". We can
conveniently use it for the marker function, which
requires a LABEL_REF wrapping. */
- gcc_assert (XEXP (label_note, 0) == XEXP (SET_SRC (label_set), 0));
+ gcc_assert (XEXP (label_note, 0) == label_ref_label (SET_SRC (label_set)));
mark_jump_label_1 (label_set, jump_insn, false, true);
handled by other optimizers using better algorithms. */
FOR_BB_INSNS (bb, insn)
{
- gcc_assert (! INSN_DELETED_P (insn));
+ gcc_assert (! insn->deleted ());
if (NONDEBUG_INSN_P (insn))
mark_jump_label (PATTERN (insn), insn, 0);
}
rtx_insn *prev_nonjump_insn = NULL;
for (insn = f; insn; insn = NEXT_INSN (insn))
{
- if (INSN_DELETED_P (insn))
+ if (insn->deleted ())
;
else if (LABEL_P (insn))
prev_nonjump_insn = NULL;
to help this function avoid overhead in these cases. */
enum rtx_code
reversed_comparison_code_parts (enum rtx_code code, const_rtx arg0,
- const_rtx arg1, const_rtx insn)
+ const_rtx arg1, const rtx_insn *insn)
{
- enum machine_mode mode;
+ machine_mode mode;
/* If this is not actually a comparison, we can't reverse it. */
if (GET_RTX_CLASS (code) != RTX_COMPARE
machine description to do tricks. */
if (GET_MODE_CLASS (mode) == MODE_CC
&& REVERSIBLE_CC_MODE (mode))
- {
-#ifdef REVERSE_CONDITION
- return REVERSE_CONDITION (code, mode);
-#else
- return reverse_condition (code);
-#endif
- }
+ return REVERSE_CONDITION (code, mode);
/* Try a few special cases based on the comparison code. */
switch (code)
if (GET_MODE_CLASS (mode) == MODE_CC || CC0_P (arg0))
{
- const_rtx prev;
/* Try to search for the comparison to determine the real mode.
This code is expensive, but with sane machine description it
will be never used, since REVERSIBLE_CC_MODE will return true
/* These CONST_CAST's are okay because prev_nonnote_insn just
returns its argument and we assign it to a const_rtx
variable. */
- for (prev = prev_nonnote_insn (CONST_CAST_RTX (insn));
+ for (rtx_insn *prev = prev_nonnote_insn (const_cast<rtx_insn *> (insn));
prev != 0 && !LABEL_P (prev);
- prev = prev_nonnote_insn (CONST_CAST_RTX (prev)))
+ prev = prev_nonnote_insn (prev))
{
const_rtx set = set_of (arg0, prev);
if (set && GET_CODE (set) == SET
/* A wrapper around the previous function to take COMPARISON as rtx
expression. This simplifies many callers. */
enum rtx_code
-reversed_comparison_code (const_rtx comparison, const_rtx insn)
+reversed_comparison_code (const_rtx comparison, const rtx_insn *insn)
{
if (!COMPARISON_P (comparison))
return UNKNOWN;
/* Return comparison with reversed code of EXP.
Return NULL_RTX in case we fail to do the reversal. */
rtx
-reversed_comparison (const_rtx exp, enum machine_mode mode)
+reversed_comparison (const_rtx exp, machine_mode mode)
{
- enum rtx_code reversed_code = reversed_comparison_code (exp, NULL_RTX);
+ enum rtx_code reversed_code = reversed_comparison_code (exp, NULL);
if (reversed_code == UNKNOWN)
return NULL_RTX;
else
/* Return 1 if INSN is an unconditional jump and nothing else. */
int
-simplejump_p (const_rtx insn)
+simplejump_p (const rtx_insn *insn)
{
return (JUMP_P (insn)
&& GET_CODE (PATTERN (insn)) == SET
branch and compare insns. Use any_condjump_p instead whenever possible. */
int
-condjump_p (const_rtx insn)
+condjump_p (const rtx_insn *insn)
{
const_rtx x = PATTERN (insn);
branch and compare insns. Use any_condjump_p instead whenever possible. */
int
-condjump_in_parallel_p (const_rtx insn)
+condjump_in_parallel_p (const rtx_insn *insn)
{
const_rtx x = PATTERN (insn);
/* Return set of PC, otherwise NULL. */
rtx
-pc_set (const_rtx insn)
+pc_set (const rtx_insn *insn)
{
rtx pat;
if (!JUMP_P (insn))
possibly bundled inside a PARALLEL. */
int
-any_uncondjump_p (const_rtx insn)
+any_uncondjump_p (const rtx_insn *insn)
{
const_rtx x = pc_set (insn);
if (!x)
Note that unlike condjump_p it returns false for unconditional jumps. */
int
-any_condjump_p (const_rtx insn)
+any_condjump_p (const rtx_insn *insn)
{
const_rtx x = pc_set (insn);
enum rtx_code a, b;
/* Return the label of a conditional jump. */
rtx
-condjump_label (const_rtx insn)
+condjump_label (const rtx_insn *insn)
{
rtx x = pc_set (insn);
/* Return TRUE if INSN is a return jump. */
int
-returnjump_p (rtx insn)
+returnjump_p (const rtx_insn *insn)
{
if (JUMP_P (insn))
{
/* Return true if INSN is a (possibly conditional) return insn. */
int
-eh_returnjump_p (rtx insn)
+eh_returnjump_p (rtx_insn *insn)
{
if (JUMP_P (insn))
{
nothing more. */
int
-onlyjump_p (const_rtx insn)
+onlyjump_p (const rtx_insn *insn)
{
rtx set;
/* Return true iff INSN is a jump and its JUMP_LABEL is a label, not
NULL or a return. */
bool
-jump_to_label_p (rtx insn)
+jump_to_label_p (const rtx_insn *insn)
{
return (JUMP_P (insn)
&& JUMP_LABEL (insn) != NULL && !ANY_RETURN_P (JUMP_LABEL (insn)));
}
-#ifdef HAVE_cc0
-
/* Return nonzero if X is an RTX that only sets the condition codes
and has no side effects. */
}
return 0;
}
-#endif
\f
/* Find all CODE_LABELs referred to in X, and increment their use
counts. If INSN is a JUMP_INSN and there is at least one
that loop-optimization is done with. */
void
-mark_jump_label (rtx x, rtx insn, int in_mem)
+mark_jump_label (rtx x, rtx_insn *insn, int in_mem)
{
rtx asmop = extract_asm_operands (x);
if (asmop)
note. */
static void
-mark_jump_label_1 (rtx x, rtx insn, bool in_mem, bool is_target)
+mark_jump_label_1 (rtx x, rtx_insn *insn, bool in_mem, bool is_target)
{
RTX_CODE code = GET_CODE (x);
int i;
case LABEL_REF:
{
- rtx label = XEXP (x, 0);
+ rtx_insn *label = label_ref_label (x);
/* Ignore remaining references to unreachable labels that
have been deleted. */
if (LABEL_REF_NONLOCAL_P (x))
break;
- XEXP (x, 0) = label;
- if (! insn || ! INSN_DELETED_P (insn))
+ set_label_ref_label (x, label);
+ if (! insn || ! insn->deleted ())
++LABEL_NUSES (label);
if (insn)
ADDR_DIFF_VEC. Don't set the JUMP_LABEL of a vector. */
case ADDR_VEC:
case ADDR_DIFF_VEC:
- if (! INSN_DELETED_P (insn))
+ if (! insn->deleted ())
{
int eltnum = code == ADDR_DIFF_VEC ? 1 : 0;
for (i = 0; i < XVECLEN (x, eltnum); i++)
- mark_jump_label_1 (XVECEXP (x, eltnum, i), NULL_RTX, in_mem,
+ mark_jump_label_1 (XVECEXP (x, eltnum, i), NULL, in_mem,
is_target);
}
return;
need to be considered targets. */
static void
-mark_jump_label_asm (rtx asmop, rtx insn)
+mark_jump_label_asm (rtx asmop, rtx_insn *insn)
{
int i;
subsequent cfg_cleanup pass to delete unreachable code if needed. */
rtx_insn *
-delete_related_insns (rtx insn)
+delete_related_insns (rtx uncast_insn)
{
+ rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
int was_code_label = (LABEL_P (insn));
rtx note;
rtx_insn *next = NEXT_INSN (insn), *prev = PREV_INSN (insn);
- while (next && INSN_DELETED_P (next))
+ while (next && next->deleted ())
next = NEXT_INSN (next);
/* This insn is already deleted => return first following nondeleted. */
- if (INSN_DELETED_P (insn))
+ if (insn->deleted ())
return next;
delete_insn (insn);
if (next != 0 && BARRIER_P (next))
delete_insn (next);
- /* If this is a call, then we have to remove the var tracking note
- for the call arguments. */
-
- if (CALL_P (insn)
- || (NONJUMP_INSN_P (insn)
- && GET_CODE (PATTERN (insn)) == SEQUENCE
- && CALL_P (XVECEXP (PATTERN (insn), 0, 0))))
- {
- rtx p;
-
- for (p = next && INSN_DELETED_P (next) ? NEXT_INSN (next) : next;
- p && NOTE_P (p);
- p = NEXT_INSN (p))
- if (NOTE_KIND (p) == NOTE_INSN_CALL_ARG_LOCATION)
- {
- remove_insn (p);
- break;
- }
- }
-
/* If deleting a jump, decrement the count of the label,
and delete the label if it is now unused. */
/* Likewise if we're deleting a dispatch table. */
- if (JUMP_TABLE_DATA_P (insn))
+ if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn))
{
- rtx pat = PATTERN (insn);
- int i, diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC;
- int len = XVECLEN (pat, diff_vec_p);
+ rtvec labels = table->get_labels ();
+ int i;
+ int len = GET_NUM_ELEM (labels);
for (i = 0; i < len; i++)
- if (LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0)) == 0)
- delete_related_insns (XEXP (XVECEXP (pat, diff_vec_p, i), 0));
- while (next && INSN_DELETED_P (next))
+ if (LABEL_NUSES (XEXP (RTVEC_ELT (labels, i), 0)) == 0)
+ delete_related_insns (XEXP (RTVEC_ELT (labels, i), 0));
+ while (next && next->deleted ())
next = NEXT_INSN (next);
return next;
}
if (LABEL_NUSES (XEXP (note, 0)) == 0)
delete_related_insns (XEXP (note, 0));
- while (prev && (INSN_DELETED_P (prev) || NOTE_P (prev)))
+ while (prev && (prev->deleted () || NOTE_P (prev)))
prev = PREV_INSN (prev);
/* If INSN was a label and a dispatch table follows it,
if (code == NOTE)
next = NEXT_INSN (next);
/* Keep going past other deleted labels to delete what follows. */
- else if (code == CODE_LABEL && INSN_DELETED_P (next))
+ else if (code == CODE_LABEL && next->deleted ())
next = NEXT_INSN (next);
/* Keep the (use (insn))s created by dbr_schedule, which needs
them in order to track liveness relative to a previous
but I see no clean and sure alternative way
to find the first insn after INSN that is not now deleted.
I hope this works. */
- while (next && INSN_DELETED_P (next))
+ while (next && next->deleted ())
next = NEXT_INSN (next);
return next;
}
if (!NOTE_P (insn))
{
- INSN_DELETED_P (insn) = 1;
+ insn->set_deleted();
/* Patch this insn out of the chain. */
/* We don't do this all at once, because we
NLABEL as a return. Accrue modifications into the change group. */
static void
-redirect_exp_1 (rtx *loc, rtx olabel, rtx nlabel, rtx insn)
+redirect_exp_1 (rtx *loc, rtx olabel, rtx nlabel, rtx_insn *insn)
{
rtx x = *loc;
RTX_CODE code = GET_CODE (x);
int i;
const char *fmt;
- if ((code == LABEL_REF && XEXP (x, 0) == olabel)
+ if ((code == LABEL_REF && label_ref_label (x) == olabel)
|| x == olabel)
{
x = redirect_target (nlabel);
if (GET_CODE (x) == LABEL_REF && loc == &PATTERN (insn))
- x = gen_rtx_SET (VOIDmode, pc_rtx, x);
+ x = gen_rtx_SET (pc_rtx, x);
validate_change (insn, loc, x, 1);
return;
}
if (code == SET && SET_DEST (x) == pc_rtx
&& ANY_RETURN_P (nlabel)
&& GET_CODE (SET_SRC (x)) == LABEL_REF
- && XEXP (SET_SRC (x), 0) == olabel)
+ && label_ref_label (SET_SRC (x)) == olabel)
{
validate_change (insn, loc, nlabel, 1);
return;
not see how to do that. */
int
-redirect_jump_1 (rtx jump, rtx nlabel)
+redirect_jump_1 (rtx_insn *jump, rtx nlabel)
{
int ochanges = num_validated_changes ();
rtx *loc, asmop;
(this can only occur when trying to produce return insns). */
int
-redirect_jump (rtx jump, rtx nlabel, int delete_unused)
+redirect_jump (rtx_jump_insn *jump, rtx nlabel, int delete_unused)
{
- rtx olabel = JUMP_LABEL (jump);
+ rtx olabel = jump->jump_label ();
if (!nlabel)
{
If DELETE_UNUSED is positive, delete related insn to OLABEL if its ref
count has dropped to zero. */
void
-redirect_jump_2 (rtx jump, rtx olabel, rtx nlabel, int delete_unused,
+redirect_jump_2 (rtx_jump_insn *jump, rtx olabel, rtx nlabel, int delete_unused,
int invert)
{
rtx note;
/* Invert the jump condition X contained in jump insn INSN. Accrue the
modifications into the change group. Return nonzero for success. */
static int
-invert_exp_1 (rtx x, rtx insn)
+invert_exp_1 (rtx x, rtx_insn *insn)
{
RTX_CODE code = GET_CODE (x);
inversion and redirection. */
int
-invert_jump_1 (rtx jump, rtx nlabel)
+invert_jump_1 (rtx_jump_insn *jump, rtx nlabel)
{
rtx x = pc_set (jump);
int ochanges;
NLABEL instead of where it jumps now. Return true if successful. */
int
-invert_jump (rtx jump, rtx nlabel, int delete_unused)
+invert_jump (rtx_jump_insn *jump, rtx nlabel, int delete_unused)
{
rtx olabel = JUMP_LABEL (jump);
&& REG_P (SUBREG_REG (y)))))
{
int reg_x = -1, reg_y = -1;
- int byte_x = 0, byte_y = 0;
+ poly_int64 byte_x = 0, byte_y = 0;
struct subreg_info info;
if (GET_MODE (x) != GET_MODE (y))
reg_y = reg_renumber[reg_y];
}
- return reg_x >= 0 && reg_x == reg_y && byte_x == byte_y;
+ return reg_x >= 0 && reg_x == reg_y && known_eq (byte_x, byte_y);
}
/* Now we have disposed of all the cases
case LABEL_REF:
/* We can't assume nonlocal labels have their following insns yet. */
if (LABEL_REF_NONLOCAL_P (x) || LABEL_REF_NONLOCAL_P (y))
- return XEXP (x, 0) == XEXP (y, 0);
+ return label_ref_label (x) == label_ref_label (y);
/* Two label-refs are equivalent if they point at labels
in the same position in the instruction stream. */
- return (next_real_insn (XEXP (x, 0))
- == next_real_insn (XEXP (y, 0)));
+ else
+ {
+ rtx_insn *xi = next_nonnote_nondebug_insn (label_ref_label (x));
+ rtx_insn *yi = next_nonnote_nondebug_insn (label_ref_label (y));
+ while (xi && LABEL_P (xi))
+ xi = next_nonnote_nondebug_insn (xi);
+ while (yi && LABEL_P (yi))
+ yi = next_nonnote_nondebug_insn (yi);
+ return xi == yi;
+ }
case SYMBOL_REF:
return XSTR (x, 0) == XSTR (y, 0);
}
break;
+ case 'p':
+ if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y)))
+ return 0;
+ break;
+
case 't':
if (XTREE (x, i) != XTREE (y, i))
return 0;