/* Definitions of target machine for GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
- 2009, 2010, 2011
- Free Software Foundation, Inc.
+ Copyright (C) 1999-2013 Free Software Foundation, Inc.
Contributed by James E. Wilson <wilson@cygnus.com> and
David Mosberger <davidm@hpl.hp.com>.
#include "target-def.h"
#include "common/common-target.h"
#include "tm_p.h"
-#include "hashtab.h"
+#include "hash-table.h"
#include "langhooks.h"
#include "gimple.h"
+#include "gimplify.h"
#include "intl.h"
#include "df.h"
#include "debug.h"
static ds_t ia64_get_insn_checked_ds (rtx);
static bool ia64_skip_rtx_p (const_rtx);
static int ia64_speculate_insn (rtx, ds_t, rtx *);
-static bool ia64_needs_block_p (int);
+static bool ia64_needs_block_p (ds_t);
static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
static int ia64_spec_check_p (rtx);
static int ia64_spec_check_src_p (rtx);
static void free_bundle_state (struct bundle_state *);
static void initiate_bundle_states (void);
static void finish_bundle_states (void);
-static unsigned bundle_state_hash (const void *);
-static int bundle_state_eq_p (const void *, const void *);
static int insert_bundle_state (struct bundle_state *);
static void initiate_bundle_state_table (void);
static void finish_bundle_state_table (void);
#undef TARGET_TRAMPOLINE_INIT
#define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
+#undef TARGET_CAN_USE_DOLOOP_P
+#define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
#undef TARGET_INVALID_WITHIN_DOLOOP
#define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
&& reg_overlap_mentioned_p (operands[0], operands[1]))
{
rtx base = XEXP (operands[1], 0);
+ rtx first_write = gen_rtx_REG (DImode, REGNO (operands[0]));
while (GET_CODE (base) != REG)
base = XEXP (base, 0);
if (REGNO (base) == REGNO (operands[0]))
- reversed = true;
- dead = true;
+ {
+ reversed = true;
+ first_write = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
+ }
+
+ if (GET_CODE (operands[0]) == REG
+ && reg_overlap_mentioned_p (first_write, operands[1]))
+ dead = true;
}
/* Another reason to do the moves in reversed order is if the first
element of the target register pair is also the second element of
else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
{
enum qfcmp_magic {
- QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
+ QCMP_INV = 1, /* Raise FP_INVALID on NaNs as a side effect. */
QCMP_UNORD = 2,
QCMP_EQ = 4,
QCMP_LT = 8,
switch (code)
{
/* 1 = equal, 0 = not equal. Equality operators do
- not raise FP_INVALID when given an SNaN operand. */
+ not raise FP_INVALID when given a NaN operand. */
case EQ: magic = QCMP_EQ; ncode = NE; break;
case NE: magic = QCMP_EQ; ncode = EQ; break;
/* isunordered() from C99. */
case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
/* Relational operators raise FP_INVALID when given
- an SNaN operand. */
+ a NaN operand. */
case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
- /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
- Expanders for buneq etc. weuld have to be added to ia64.md
- for this to be useful. */
+ /* Unordered relational operators do not raise FP_INVALID
+ when given a NaN operand. */
+ case UNLT: magic = QCMP_LT |QCMP_UNORD; ncode = NE; break;
+ case UNLE: magic = QCMP_LT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
+ case UNGT: magic = QCMP_GT |QCMP_UNORD; ncode = NE; break;
+ case UNGE: magic = QCMP_GT|QCMP_EQ|QCMP_UNORD; ncode = NE; break;
+ /* Not supported. */
+ case UNEQ:
+ case LTGT:
default: gcc_unreachable ();
}
/* We always use the 16-byte scratch area provided by the caller, but
if we are a leaf function, there's no one to which we need to provide
- a scratch area. */
- if (crtl->is_leaf)
+ a scratch area. However, if the function allocates dynamic stack space,
+ the dynamic offset is computed early and contains STACK_POINTER_OFFSET,
+ so we need to cope. */
+ if (crtl->is_leaf && !cfun->calls_alloca)
total_size = MAX (0, total_size - 16);
current_frame_info.total_size = total_size;
switch (to)
{
case HARD_FRAME_POINTER_REGNUM:
- if (crtl->is_leaf)
- offset = -current_frame_info.total_size;
- else
- offset = -(current_frame_info.total_size
- - crtl->outgoing_args_size - 16);
+ offset = -current_frame_info.total_size;
+ if (!crtl->is_leaf || cfun->calls_alloca)
+ offset += 16 + crtl->outgoing_args_size;
break;
case STACK_POINTER_REGNUM:
- if (crtl->is_leaf)
- offset = 0;
- else
- offset = 16 + crtl->outgoing_args_size;
+ offset = 0;
+ if (!crtl->is_leaf || cfun->calls_alloca)
+ offset += 16 + crtl->outgoing_args_size;
break;
default:
#define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
- inclusive. These are offsets from the current stack pointer. SOL is the
- size of local registers. ??? This clobbers r2 and r3. */
+ inclusive. These are offsets from the current stack pointer. BS_SIZE
+ is the size of the backing store. ??? This clobbers r2 and r3. */
static void
-ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size, int sol)
+ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
+ int bs_size)
{
- /* On the IA-64 there is a second stack in memory, namely the Backing Store
- of the Register Stack Engine. We also need to probe it after checking
- that the 2 stacks don't overlap. */
- const int bs_size = BACKING_STORE_SIZE (sol);
rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
-
- /* Detect collision of the 2 stacks if necessary. */
- if (bs_size > 0 || size > 0)
- {
- rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
-
- emit_insn (gen_bsp_value (r3));
- emit_move_insn (r2, GEN_INT (-(first + size)));
-
- /* Compare current value of BSP and SP registers. */
- emit_insn (gen_rtx_SET (VOIDmode, p6,
- gen_rtx_fmt_ee (LTU, BImode,
- r3, stack_pointer_rtx)));
-
- /* Compute the address of the probe for the Backing Store (which grows
- towards higher addresses). We probe only at the first offset of
- the next page because some OS (eg Linux/ia64) only extend the
- backing store when this specific address is hit (but generate a SEGV
- on other address). Page size is the worst case (4KB). The reserve
- size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
- Also compute the address of the last probe for the memory stack
- (which grows towards lower addresses). */
- emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (Pmode, r3, 4095)));
- emit_insn (gen_rtx_SET (VOIDmode, r2,
- gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
-
- /* Compare them and raise SEGV if the former has topped the latter. */
- emit_insn (gen_rtx_COND_EXEC (VOIDmode,
- gen_rtx_fmt_ee (NE, VOIDmode, p6,
- const0_rtx),
- gen_rtx_SET (VOIDmode, p6,
- gen_rtx_fmt_ee (GEU, BImode,
- r3, r2))));
- emit_insn (gen_rtx_SET (VOIDmode,
- gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
- const0_rtx),
- const0_rtx));
- emit_insn (gen_rtx_COND_EXEC (VOIDmode,
- gen_rtx_fmt_ee (NE, VOIDmode, p6,
- const0_rtx),
- gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
- GEN_INT (11))));
- }
+ rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
+
+ /* On the IA-64 there is a second stack in memory, namely the Backing Store
+ of the Register Stack Engine. We also need to probe it after checking
+ that the 2 stacks don't overlap. */
+ emit_insn (gen_bsp_value (r3));
+ emit_move_insn (r2, GEN_INT (-(first + size)));
+
+ /* Compare current value of BSP and SP registers. */
+ emit_insn (gen_rtx_SET (VOIDmode, p6,
+ gen_rtx_fmt_ee (LTU, BImode,
+ r3, stack_pointer_rtx)));
+
+ /* Compute the address of the probe for the Backing Store (which grows
+ towards higher addresses). We probe only at the first offset of
+ the next page because some OS (eg Linux/ia64) only extend the
+ backing store when this specific address is hit (but generate a SEGV
+ on other address). Page size is the worst case (4KB). The reserve
+ size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
+ Also compute the address of the last probe for the memory stack
+ (which grows towards lower addresses). */
+ emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (Pmode, r3, 4095)));
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
+
+ /* Compare them and raise SEGV if the former has topped the latter. */
+ emit_insn (gen_rtx_COND_EXEC (VOIDmode,
+ gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
+ gen_rtx_SET (VOIDmode, p6,
+ gen_rtx_fmt_ee (GEU, BImode,
+ r3, r2))));
+ emit_insn (gen_rtx_SET (VOIDmode,
+ gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
+ const0_rtx),
+ const0_rtx));
+ emit_insn (gen_rtx_COND_EXEC (VOIDmode,
+ gen_rtx_fmt_ee (NE, VOIDmode, p6, const0_rtx),
+ gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
+ GEN_INT (11))));
/* Probe the Backing Store if necessary. */
if (bs_size > 0)
current_function_static_stack_size = current_frame_info.total_size;
if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
- ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
- current_frame_info.total_size,
- current_frame_info.n_input_regs
- + current_frame_info.n_local_regs);
+ {
+ HOST_WIDE_INT size = current_frame_info.total_size;
+ int bs_size = BACKING_STORE_SIZE (current_frame_info.n_input_regs
+ + current_frame_info.n_local_regs);
+
+ if (crtl->is_leaf && !cfun->calls_alloca)
+ {
+ if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
+ ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
+ size - STACK_CHECK_PROTECT,
+ bs_size);
+ else if (size + bs_size > STACK_CHECK_PROTECT)
+ ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, 0, bs_size);
+ }
+ else if (size + bs_size > 0)
+ ia64_emit_probe_stack_range (STACK_CHECK_PROTECT, size, bs_size);
+ }
if (dump_file)
{
case UNGE:
str = "nlt";
break;
+ case UNEQ:
+ case LTGT:
+ gcc_unreachable ();
default:
str = GET_RTX_NAME (GET_CODE (x));
break;
x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
if (x)
{
- int pred_val = INTVAL (XEXP (x, 0));
+ int pred_val = XINT (x, 0);
/* Guess top and bottom 10% statically predicted. */
if (pred_val < REG_BR_PROB_BASE / 50
else
which = ".sptk";
}
- else if (GET_CODE (current_output_insn) == CALL_INSN)
+ else if (CALL_P (current_output_insn))
which = ".sptk";
else
which = ".dptk";
static int
ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
{
- if (GET_CODE (x) == UNSPEC)
+ switch (XINT (x, 1))
{
- switch (XINT (x, 1))
- {
- case UNSPEC_LDA:
- case UNSPEC_LDS:
- case UNSPEC_LDSA:
- case UNSPEC_LDCCLR:
- case UNSPEC_CHKACLR:
- case UNSPEC_CHKS:
- /* These unspecs are just wrappers. */
- return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
- }
+ case UNSPEC_LDA:
+ case UNSPEC_LDS:
+ case UNSPEC_LDSA:
+ case UNSPEC_LDCCLR:
+ case UNSPEC_CHKACLR:
+ case UNSPEC_CHKS:
+ /* These unspecs are just wrappers. */
+ return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
}
return default_unspec_may_trap_p (x, flags);
{
unsigned int i;
cl_deferred_option *opt;
- VEC(cl_deferred_option,heap) *vec
- = (VEC(cl_deferred_option,heap) *) ia64_deferred_options;
+ vec<cl_deferred_option> *v
+ = (vec<cl_deferred_option> *) ia64_deferred_options;
- FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
- {
- switch (opt->opt_index)
- {
- case OPT_mfixed_range_:
- fix_range (opt->arg);
- break;
+ if (v)
+ FOR_EACH_VEC_ELT (*v, i, opt)
+ {
+ switch (opt->opt_index)
+ {
+ case OPT_mfixed_range_:
+ fix_range (opt->arg);
+ break;
- default:
- gcc_unreachable ();
- }
- }
+ default:
+ gcc_unreachable ();
+ }
+ }
if (TARGET_AUTO_PIC)
target_flags |= MASK_CONST_GP;
memset (rws_insn, 0, sizeof (rws_insn));
/* Don't bundle a call following another call. */
- if ((pat = prev_active_insn (insn))
- && GET_CODE (pat) == CALL_INSN)
+ if ((pat = prev_active_insn (insn)) && CALL_P (pat))
{
need_barrier = 1;
break;
flags.is_branch = 1;
/* Don't bundle a jump following a call. */
- if ((pat = prev_active_insn (insn))
- && GET_CODE (pat) == CALL_INSN)
+ if ((pat = prev_active_insn (insn)) && CALL_P (pat))
{
need_barrier = 1;
break;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == CODE_LABEL)
+ if (LABEL_P (insn))
{
if (insns_since_last_label)
last_label = insn;
insns_since_last_label = 0;
}
- else if (GET_CODE (insn) == NOTE
+ else if (NOTE_P (insn)
&& NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
{
if (insns_since_last_label)
last_label = insn;
insns_since_last_label = 0;
}
- else if (GET_CODE (insn) == INSN
+ else if (NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
&& XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
{
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == BARRIER)
+ if (BARRIER_P (insn))
{
rtx last = prev_active_insn (insn);
if (! last)
continue;
- if (GET_CODE (last) == JUMP_INSN
- && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
+ if (JUMP_TABLE_DATA_P (last))
last = prev_active_insn (last);
if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
int needed = group_barrier_needed (insn);
gcc_assert (!needed);
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
init_insn_group_barriers ();
stops_p [INSN_UID (insn)] = stop_before_p;
stop_before_p = 0;
&& last_scheduled_insn
&& scheduled_good_insn (last_scheduled_insn))))
|| (last_scheduled_insn
- && (GET_CODE (last_scheduled_insn) == CALL_INSN
+ && (CALL_P (last_scheduled_insn)
|| unknown_for_bundling_p (last_scheduled_insn))))
{
init_insn_group_barriers ();
state_transition (curr_state, dfa_stop_insn);
if (TARGET_EARLY_STOP_BITS)
*sort_p = (last_scheduled_insn == NULL_RTX
- || GET_CODE (last_scheduled_insn) != CALL_INSN);
+ || ! CALL_P (last_scheduled_insn));
else
*sort_p = 0;
return 1;
return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
}
-/* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
- If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
- Otherwise, generate a simple check. */
+/* Generate (or regenerate) a recovery check for INSN. */
static rtx
ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
{
}
}
-/* Hash table of the bundle states. The key is dfa_state and insn_num
- of the bundle states. */
+/* Hashtable helpers. */
-static htab_t bundle_state_table;
+struct bundle_state_hasher : typed_noop_remove <bundle_state>
+{
+ typedef bundle_state value_type;
+ typedef bundle_state compare_type;
+ static inline hashval_t hash (const value_type *);
+ static inline bool equal (const value_type *, const compare_type *);
+};
/* The function returns hash of BUNDLE_STATE. */
-static unsigned
-bundle_state_hash (const void *bundle_state)
+inline hashval_t
+bundle_state_hasher::hash (const value_type *state)
{
- const struct bundle_state *const state
- = (const struct bundle_state *) bundle_state;
unsigned result, i;
for (result = i = 0; i < dfa_state_size; i++)
/* The function returns nonzero if the bundle state keys are equal. */
-static int
-bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
+inline bool
+bundle_state_hasher::equal (const value_type *state1,
+ const compare_type *state2)
{
- const struct bundle_state *const state1
- = (const struct bundle_state *) bundle_state_1;
- const struct bundle_state *const state2
- = (const struct bundle_state *) bundle_state_2;
-
return (state1->insn_num == state2->insn_num
&& memcmp (state1->dfa_state, state2->dfa_state,
dfa_state_size) == 0);
}
+/* Hash table of the bundle states. The key is dfa_state and insn_num
+ of the bundle states. */
+
+static hash_table <bundle_state_hasher> bundle_state_table;
+
/* The function inserts the BUNDLE_STATE into the hash table. The
function returns nonzero if the bundle has been inserted into the
table. The table contains the best bundle state with given key. */
static int
insert_bundle_state (struct bundle_state *bundle_state)
{
- void **entry_ptr;
+ struct bundle_state **entry_ptr;
- entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
+ entry_ptr = bundle_state_table.find_slot (bundle_state, INSERT);
if (*entry_ptr == NULL)
{
bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
index_to_bundle_states [bundle_state->insn_num] = bundle_state;
- *entry_ptr = (void *) bundle_state;
+ *entry_ptr = bundle_state;
return TRUE;
}
- else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
- || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
- && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
+ else if (bundle_state->cost < (*entry_ptr)->cost
+ || (bundle_state->cost == (*entry_ptr)->cost
+ && ((*entry_ptr)->accumulated_insns_num
> bundle_state->accumulated_insns_num
- || (((struct bundle_state *)
- *entry_ptr)->accumulated_insns_num
+ || ((*entry_ptr)->accumulated_insns_num
== bundle_state->accumulated_insns_num
- && (((struct bundle_state *)
- *entry_ptr)->branch_deviation
+ && ((*entry_ptr)->branch_deviation
> bundle_state->branch_deviation
- || (((struct bundle_state *)
- *entry_ptr)->branch_deviation
+ || ((*entry_ptr)->branch_deviation
== bundle_state->branch_deviation
- && ((struct bundle_state *)
- *entry_ptr)->middle_bundle_stops
+ && (*entry_ptr)->middle_bundle_stops
> bundle_state->middle_bundle_stops))))))
{
struct bundle_state temp;
- temp = *(struct bundle_state *) *entry_ptr;
- *(struct bundle_state *) *entry_ptr = *bundle_state;
- ((struct bundle_state *) *entry_ptr)->next = temp.next;
+ temp = **entry_ptr;
+ **entry_ptr = *bundle_state;
+ (*entry_ptr)->next = temp.next;
*bundle_state = temp;
}
return FALSE;
static void
initiate_bundle_state_table (void)
{
- bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
- (htab_del) 0);
+ bundle_state_table.create (50);
}
/* Finish work with the hash table. */
static void
finish_bundle_state_table (void)
{
- htab_delete (bundle_state_table);
+ bundle_state_table.dispose ();
}
\f
{
do
insn = next_active_insn (insn);
- while (GET_CODE (insn) == INSN
+ while (NONJUMP_INSN_P (insn)
&& get_attr_empty (insn) == EMPTY_YES);
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
else if (note)
{
insn != current_sched_info->next_tail;
insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == BARRIER)
+ if (BARRIER_P (insn))
{
rtx last = prev_active_insn (insn);
if (! last)
continue;
- if (GET_CODE (last) == JUMP_INSN
- && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
+ if (JUMP_TABLE_DATA_P (last))
last = prev_active_insn (last);
if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
else if (recog_memoized (insn) >= 0
&& important_for_bundling_p (insn))
seen_good_insn = 1;
- need_barrier_p = (GET_CODE (insn) == CALL_INSN
- || unknown_for_bundling_p (insn));
+ need_barrier_p = (CALL_P (insn) || unknown_for_bundling_p (insn));
}
}
}
rtx head = BB_HEAD (bb);
/* We only need such notes at code labels. */
- if (GET_CODE (head) != CODE_LABEL)
+ if (! LABEL_P (head))
continue;
if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
head = NEXT_INSN (head);
while (1)
{
- if (GET_CODE (insn) == CALL_INSN
+ if (CALL_P (insn)
&& GET_CODE (PATTERN (insn)) == COND_EXEC
&& find_reg_note (insn, REG_NORETURN, NULL_RTX))
{
if (insn)
{
/* Skip over insns that expand to nothing. */
- while (GET_CODE (insn) == INSN
+ while (NONJUMP_INSN_P (insn)
&& get_attr_empty (insn) == EMPTY_YES)
{
if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
saw_stop = 1;
insn = prev_active_insn (insn);
}
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
if (! saw_stop)
emit_insn (gen_insn_group_barrier (GEN_INT (3)));
}
}
- if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
+ if (NOTE_P (insn) || ! RTX_FRAME_RELATED_P (insn))
return;
/* Look for the ALLOC insn. */
instruction scheduling worth while. Note that use_thunk calls
assemble_start_function and assemble_end_function. */
- insn_locators_alloc ();
emit_all_insn_group_barriers (NULL);
insn = get_insns ();
shorten_branches (insn);