/* Definitions of target machine for GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
- 2009, 2010, 2011
- Free Software Foundation, Inc.
+ Copyright (C) 1999-2013 Free Software Foundation, Inc.
Contributed by James E. Wilson <wilson@cygnus.com> and
David Mosberger <davidm@hpl.hp.com>.
#include "timevar.h"
#include "target.h"
#include "target-def.h"
+#include "common/common-target.h"
#include "tm_p.h"
#include "hashtab.h"
#include "langhooks.h"
-#include "cfglayout.h"
#include "gimple.h"
#include "intl.h"
#include "df.h"
#include "tm-constrs.h"
#include "sel-sched.h"
#include "reload.h"
-#include "dwarf2out.h"
#include "opts.h"
+#include "dumpfile.h"
/* This is used for communication between ASM_OUTPUT_LABEL and
ASM_OUTPUT_LABELREF. */
static rtx get_next_important_insn (rtx, rtx);
static bool important_for_bundling_p (rtx);
+static bool unknown_for_bundling_p (rtx);
static void bundling (FILE *, int, rtx, rtx);
static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
static enum machine_mode ia64_c_mode_for_suffix (char);
static void ia64_trampoline_init (rtx, tree, rtx);
static void ia64_override_options_after_change (void);
+static bool ia64_member_type_forces_blk (const_tree, enum machine_mode);
static tree ia64_builtin_decl (unsigned, bool);
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS ia64_rtx_costs
#undef TARGET_ADDRESS_COST
-#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
+#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
#undef TARGET_UNSPEC_MAY_TRAP_P
#define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
#undef TARGET_GET_RAW_ARG_MODE
#define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
+#undef TARGET_MEMBER_TYPE_FORCES_BLK
+#define TARGET_MEMBER_TYPE_FORCES_BLK ia64_member_type_forces_blk
+
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
return NULL_TREE;
}
-/* The section must have global and overlaid attributes. */
-#define SECTION_VMS_OVERLAY SECTION_MACH_DEP
-
/* Part of the low level implementation of DEC Ada pragma Common_Object which
enables the shared use of variables stored in overlaid linker areas
corresponding to the use of Fortran COMMON. */
bool *no_add_attrs)
{
tree decl = *node;
- tree id, val;
- if (! DECL_P (decl))
- abort ();
+ tree id;
+
+ gcc_assert (DECL_P (decl));
DECL_COMMON (decl) = 1;
id = TREE_VALUE (args);
- if (TREE_CODE (id) == IDENTIFIER_NODE)
- val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
- else if (TREE_CODE (id) == STRING_CST)
- val = id;
- else
+ if (TREE_CODE (id) != IDENTIFIER_NODE && TREE_CODE (id) != STRING_CST)
{
- warning (OPT_Wattributes,
- "%qE attribute requires a string constant argument", name);
+ error ("%qE attribute requires a string constant argument", name);
*no_add_attrs = true;
return NULL_TREE;
}
- DECL_SECTION_NAME (decl) = val;
return NULL_TREE;
}
{
tree attr = DECL_ATTRIBUTES (decl);
- /* As common_object attribute set DECL_SECTION_NAME check it before
- looking up the attribute. */
- if (DECL_SECTION_NAME (decl) && attr)
+ if (attr)
attr = lookup_attribute ("common_object", attr);
- else
- attr = NULL_TREE;
-
- if (!attr)
+ if (attr)
{
- /* Code from elfos.h. */
- fprintf (file, "%s", COMMON_ASM_OP);
- assemble_name (file, name);
- fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
- size, align / BITS_PER_UNIT);
- }
- else
- {
- ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
- ASM_OUTPUT_LABEL (file, name);
- ASM_OUTPUT_SKIP (file, size ? size : 1);
- }
-}
+ tree id = TREE_VALUE (TREE_VALUE (attr));
+ const char *name;
-/* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
+ if (TREE_CODE (id) == IDENTIFIER_NODE)
+ name = IDENTIFIER_POINTER (id);
+ else if (TREE_CODE (id) == STRING_CST)
+ name = TREE_STRING_POINTER (id);
+ else
+ abort ();
-void
-ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
- tree decl)
-{
- if (!(flags & SECTION_VMS_OVERLAY))
- {
- default_elf_asm_named_section (name, flags, decl);
- return;
+ fprintf (file, "\t.vms_common\t\"%s\",", name);
}
- if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
- abort ();
+ else
+ fprintf (file, "%s", COMMON_ASM_OP);
- if (flags & SECTION_DECLARED)
- {
- fprintf (asm_out_file, "\t.section\t%s\n", name);
- return;
- }
+ /* Code from elfos.h. */
+ assemble_name (file, name);
+ fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u",
+ size, align / BITS_PER_UNIT);
- fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
+ fputc ('\n', file);
}
static void
if (lo != 0)
{
addend = lo;
- src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
+ src = plus_constant (Pmode, XEXP (XEXP (src, 0), 0), hi);
}
}
addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
addend_hi = addend - addend_lo;
- op1 = plus_constant (op1, addend_hi);
+ op1 = plus_constant (Pmode, op1, addend_hi);
addend = addend_lo;
tmp = gen_reg_rtx (Pmode);
if (addend_lo != 0)
{
- op1 = plus_constant (sym, addend_hi);
+ op1 = plus_constant (mode, sym, addend_hi);
addend = addend_lo;
}
else
(in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
out[1] = adjust_automodify_address
(in, DImode,
- gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
+ gen_rtx_POST_MODIFY (Pmode, base,
+ plus_constant (Pmode, base, -24)),
8);
break;
&& GET_MODE (SUBREG_REG (in)) == TImode
&& GET_CODE (SUBREG_REG (in)) == REG)
{
- rtx memt = assign_stack_temp (TImode, 16, 0);
+ rtx memt = assign_stack_temp (TImode, 16);
emit_move_insn (memt, SUBREG_REG (in));
return adjust_address (memt, mode, 0);
}
else if (force && GET_CODE (in) == REG)
{
- rtx memx = assign_stack_temp (mode, 16, 0);
+ rtx memx = assign_stack_temp (mode, 16);
emit_move_insn (memx, in);
return memx;
}
memt = adjust_address (in, TImode, 0);
else
{
- memt = assign_stack_temp (TImode, 16, 0);
+ memt = assign_stack_temp (TImode, 16);
memx = adjust_address (memt, mode, 0);
emit_move_insn (memx, in);
}
emit_move_insn (operands[0], t);
}
-/* Emit a signed or unsigned V8QI dot product operation. */
-
-void
-ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
-{
- rtx op1, op2, sn1, sn2, l1, l2, h1, h2;
- rtx p1, p2, p3, p4, s1, s2, s3;
-
- op1 = operands[1];
- op2 = operands[2];
- sn1 = ia64_unpack_sign (op1, unsignedp);
- sn2 = ia64_unpack_sign (op2, unsignedp);
-
- l1 = gen_reg_rtx (V4HImode);
- l2 = gen_reg_rtx (V4HImode);
- h1 = gen_reg_rtx (V4HImode);
- h2 = gen_reg_rtx (V4HImode);
- ia64_unpack_assemble (l1, op1, sn1, false);
- ia64_unpack_assemble (l2, op2, sn2, false);
- ia64_unpack_assemble (h1, op1, sn1, true);
- ia64_unpack_assemble (h2, op2, sn2, true);
-
- p1 = gen_reg_rtx (V2SImode);
- p2 = gen_reg_rtx (V2SImode);
- p3 = gen_reg_rtx (V2SImode);
- p4 = gen_reg_rtx (V2SImode);
- emit_insn (gen_pmpy2_even (p1, l1, l2));
- emit_insn (gen_pmpy2_even (p2, h1, h2));
- emit_insn (gen_pmpy2_odd (p3, l1, l2));
- emit_insn (gen_pmpy2_odd (p4, h1, h2));
-
- s1 = gen_reg_rtx (V2SImode);
- s2 = gen_reg_rtx (V2SImode);
- s3 = gen_reg_rtx (V2SImode);
- emit_insn (gen_addv2si3 (s1, p1, p2));
- emit_insn (gen_addv2si3 (s2, p3, p4));
- emit_insn (gen_addv2si3 (s3, s1, operands[3]));
- emit_insn (gen_addv2si3 (operands[0], s2, s3));
-}
-
/* Emit the appropriate sequence for a call. */
void
if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
&& current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
- else if (current_function_is_leaf
+ else if (crtl->is_leaf
&& regno >= GR_REG (1) && regno <= GR_REG (31))
current_frame_info.gr_used_mask |= 1 << regno;
/* If this is a leaf function, first try an otherwise unused
call-clobbered register. */
- if (current_function_is_leaf)
+ if (crtl->is_leaf)
{
for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
if (! df_regs_ever_live_p (regno)
if (cfun->machine->ia64_eh_epilogue_bsp)
mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
+ /* Static stack checking uses r2 and r3. */
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ current_frame_info.gr_used_mask |= 0xc;
+
/* Find the size of the register stack frame. We have only 80 local
registers, because we reserve 8 for the inputs and 8 for the
outputs. */
}
}
- if (! current_function_is_leaf)
+ if (! crtl->is_leaf)
{
/* Emit a save of BR0 if we call other functions. Do this even
if this function doesn't return, as EH depends on this to be
a scratch area. However, if the function allocates dynamic stack space,
the dynamic offset is computed early and contains STACK_POINTER_OFFSET,
so we need to cope. */
- if (current_function_is_leaf && !cfun->calls_alloca)
+ if (crtl->is_leaf && !cfun->calls_alloca)
total_size = MAX (0, total_size - 16);
current_frame_info.total_size = total_size;
bool
ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
{
- return (to == BR_REG (0) ? current_function_is_leaf : true);
+ return (to == BR_REG (0) ? crtl->is_leaf : true);
}
/* Compute the initial difference between the specified pair of registers. */
{
case HARD_FRAME_POINTER_REGNUM:
offset = -current_frame_info.total_size;
- if (!current_function_is_leaf || cfun->calls_alloca)
+ if (!crtl->is_leaf || cfun->calls_alloca)
offset += 16 + crtl->outgoing_args_size;
break;
case STACK_POINTER_REGNUM:
offset = 0;
- if (!current_function_is_leaf || cfun->calls_alloca)
+ if (!crtl->is_leaf || cfun->calls_alloca)
offset += 16 + crtl->outgoing_args_size;
break;
add_reg_note (insn, REG_CFA_OFFSET,
gen_rtx_SET (VOIDmode,
gen_rtx_MEM (GET_MODE (reg),
- plus_constant (base, off)),
+ plus_constant (Pmode,
+ base, off)),
frame_reg));
}
}
return gen_fr_restore (dest, src);
}
+#define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+
+/* See Table 6.2 of the IA-64 Software Developer Manual, Volume 2. */
+#define BACKING_STORE_SIZE(N) ((N) > 0 ? ((N) + (N)/63 + 1) * 8 : 0)
+
+/* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
+ inclusive. These are offsets from the current stack pointer. SOL is the
+ size of local registers. ??? This clobbers r2 and r3. */
+
+static void
+ia64_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size, int sol)
+{
+ /* On the IA-64 there is a second stack in memory, namely the Backing Store
+ of the Register Stack Engine. We also need to probe it after checking
+ that the 2 stacks don't overlap. */
+ const int bs_size = BACKING_STORE_SIZE (sol);
+ rtx r2 = gen_rtx_REG (Pmode, GR_REG (2));
+ rtx r3 = gen_rtx_REG (Pmode, GR_REG (3));
+
+ /* Detect collision of the 2 stacks if necessary. */
+ if (bs_size > 0 || size > 0)
+ {
+ rtx p6 = gen_rtx_REG (BImode, PR_REG (6));
+
+ emit_insn (gen_bsp_value (r3));
+ emit_move_insn (r2, GEN_INT (-(first + size)));
+
+ /* Compare current value of BSP and SP registers. */
+ emit_insn (gen_rtx_SET (VOIDmode, p6,
+ gen_rtx_fmt_ee (LTU, BImode,
+ r3, stack_pointer_rtx)));
+
+ /* Compute the address of the probe for the Backing Store (which grows
+ towards higher addresses). We probe only at the first offset of
+ the next page because some OS (eg Linux/ia64) only extend the
+ backing store when this specific address is hit (but generate a SEGV
+ on other address). Page size is the worst case (4KB). The reserve
+ size is at least 4096 - (96 + 2) * 8 = 3312 bytes, which is enough.
+ Also compute the address of the last probe for the memory stack
+ (which grows towards lower addresses). */
+ emit_insn (gen_rtx_SET (VOIDmode, r3, plus_constant (Pmode, r3, 4095)));
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
+
+ /* Compare them and raise SEGV if the former has topped the latter. */
+ emit_insn (gen_rtx_COND_EXEC (VOIDmode,
+ gen_rtx_fmt_ee (NE, VOIDmode, p6,
+ const0_rtx),
+ gen_rtx_SET (VOIDmode, p6,
+ gen_rtx_fmt_ee (GEU, BImode,
+ r3, r2))));
+ emit_insn (gen_rtx_SET (VOIDmode,
+ gen_rtx_ZERO_EXTRACT (DImode, r3, GEN_INT (12),
+ const0_rtx),
+ const0_rtx));
+ emit_insn (gen_rtx_COND_EXEC (VOIDmode,
+ gen_rtx_fmt_ee (NE, VOIDmode, p6,
+ const0_rtx),
+ gen_rtx_TRAP_IF (VOIDmode, const1_rtx,
+ GEN_INT (11))));
+ }
+
+ /* Probe the Backing Store if necessary. */
+ if (bs_size > 0)
+ emit_stack_probe (r3);
+
+ /* Probe the memory stack if necessary. */
+ if (size == 0)
+ ;
+
+ /* See if we have a constant small number of probes to generate. If so,
+ that's the easy case. */
+ else if (size <= PROBE_INTERVAL)
+ emit_stack_probe (r2);
+
+ /* The run-time loop is made up of 8 insns in the generic case while this
+ compile-time loop is made up of 5+2*(n-2) insns for n # of intervals. */
+ else if (size <= 4 * PROBE_INTERVAL)
+ {
+ HOST_WIDE_INT i;
+
+ emit_move_insn (r2, GEN_INT (-(first + PROBE_INTERVAL)));
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
+ emit_stack_probe (r2);
+
+ /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
+ it exceeds SIZE. If only two probes are needed, this will not
+ generate any code. Then probe at FIRST + SIZE. */
+ for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ plus_constant (Pmode, r2, -PROBE_INTERVAL)));
+ emit_stack_probe (r2);
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ plus_constant (Pmode, r2,
+ (i - PROBE_INTERVAL) - size)));
+ emit_stack_probe (r2);
+ }
+
+ /* Otherwise, do the same as above, but in a loop. Note that we must be
+ extra careful with variables wrapping around because we might be at
+ the very top (or the very bottom) of the address space and we have
+ to be able to handle this case properly; in particular, we use an
+ equality test for the loop condition. */
+ else
+ {
+ HOST_WIDE_INT rounded_size;
+
+ emit_move_insn (r2, GEN_INT (-first));
+
+
+ /* Step 1: round SIZE to the previous multiple of the interval. */
+
+ rounded_size = size & -PROBE_INTERVAL;
+
+
+ /* Step 2: compute initial and final value of the loop counter. */
+
+ /* TEST_ADDR = SP + FIRST. */
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, r2)));
+
+ /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
+ if (rounded_size > (1 << 21))
+ {
+ emit_move_insn (r3, GEN_INT (-rounded_size));
+ emit_insn (gen_rtx_SET (VOIDmode, r3, gen_rtx_PLUS (Pmode, r2, r3)));
+ }
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, r3,
+ gen_rtx_PLUS (Pmode, r2,
+ GEN_INT (-rounded_size))));
+
+
+ /* Step 3: the loop
+
+ while (TEST_ADDR != LAST_ADDR)
+ {
+ TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
+ probe at TEST_ADDR
+ }
+
+ probes at FIRST + N * PROBE_INTERVAL for values of N from 1
+ until it is equal to ROUNDED_SIZE. */
+
+ emit_insn (gen_probe_stack_range (r2, r2, r3));
+
+
+ /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
+ that SIZE is equal to ROUNDED_SIZE. */
+
+ /* TEMP = SIZE - ROUNDED_SIZE. */
+ if (size != rounded_size)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, r2,
+ plus_constant (Pmode, r2,
+ rounded_size - size)));
+ emit_stack_probe (r2);
+ }
+ }
+
+ /* Make sure nothing is scheduled before we are done. */
+ emit_insn (gen_blockage ());
+}
+
+/* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
+ absolute addresses. */
+
+const char *
+output_probe_stack_range (rtx reg1, rtx reg2)
+{
+ static int labelno = 0;
+ char loop_lab[32], end_lab[32];
+ rtx xops[3];
+
+ ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
+ ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
+
+ /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
+ xops[0] = reg1;
+ xops[1] = reg2;
+ xops[2] = gen_rtx_REG (BImode, PR_REG (6));
+ output_asm_insn ("cmp.eq %2, %I2 = %0, %1", xops);
+ fprintf (asm_out_file, "\t(%s) br.cond.dpnt ", reg_names [REGNO (xops[2])]);
+ assemble_name_raw (asm_out_file, end_lab);
+ fputc ('\n', asm_out_file);
+
+ /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
+ xops[1] = GEN_INT (-PROBE_INTERVAL);
+ output_asm_insn ("addl %0 = %1, %0", xops);
+ fputs ("\t;;\n", asm_out_file);
+
+ /* Probe at TEST_ADDR and branch. */
+ output_asm_insn ("probe.w.fault %0, 0", xops);
+ fprintf (asm_out_file, "\tbr ");
+ assemble_name_raw (asm_out_file, loop_lab);
+ fputc ('\n', asm_out_file);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
+
+ return "";
+}
+
/* Called after register allocation to add any instructions needed for the
prologue. Using a prologue insn is favored compared to putting all of the
instructions in output_function_prologue(), since it allows the scheduler
Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
so that the debug info generation code can handle them properly.
- The register save area is layed out like so:
+ The register save area is laid out like so:
cfa+16
[ varargs spill area ]
[ fr register spill area ]
if (flag_stack_usage_info)
current_function_static_stack_size = current_frame_info.total_size;
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ ia64_emit_probe_stack_range (STACK_CHECK_PROTECT,
+ current_frame_info.total_size,
+ current_frame_info.n_input_regs
+ + current_frame_info.n_local_regs);
+
if (dump_file)
{
fprintf (dump_file, "ia64 frame related registers "
ia64_start_function (FILE *file, const char *fnname,
tree decl ATTRIBUTE_UNUSED)
{
-#if VMS_DEBUGGING_INFO
- if (vms_debug_main
- && debug_info_level > DINFO_LEVEL_NONE
- && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
- {
- targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
- ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
- dwarf2out_vms_debug_main_pointer ();
- vms_debug_main = 0;
- }
+#if TARGET_ABI_OPEN_VMS
+ vms_start_function (fnname);
#endif
fputs ("\t.proc ", file);
emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
- emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
+ emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (Pmode, addr, 16)));
emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
return 0;
+ /* On OpenVMS argument is either in Rn or Fn. */
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ if (FLOAT_MODE_P (mode))
+ return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
+ else
+ return gen_rtx_REG (mode, basereg + cum->words);
+ }
+
/* Check for and handle homogeneous FP aggregates. */
if (type)
hfa_mode = hfa_element_mode (type, 0);
return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
}
- /* On OpenVMS variable argument is either in Rn or Fn. */
- else if (TARGET_ABI_OPEN_VMS && named == 0)
- {
- if (FLOAT_MODE_P (mode))
- return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
- else
- return gen_rtx_REG (mode, basereg + cum->words);
- }
-
/* Integral and aggregates go in general registers. If we have run out of
FR registers, then FP values must also go in general registers. This can
happen when we have a SFmode HFA. */
cum->atypes[cum->words] = ia64_arg_type (mode);
cum->words += words + offset;
+ /* On OpenVMS argument is either in Rn or Fn. */
+ if (TARGET_ABI_OPEN_VMS)
+ {
+ cum->int_regs = cum->words;
+ cum->fp_regs = cum->words;
+ return;
+ }
+
/* Check for and handle homogeneous FP aggregates. */
if (type)
hfa_mode = hfa_element_mode (type, 0);
cum->fp_regs = fp_regs;
}
- /* On OpenVMS variable argument is either in Rn or Fn. */
- else if (TARGET_ABI_OPEN_VMS && named == 0)
- {
- cum->int_regs = cum->words;
- cum->fp_regs = cum->words;
- }
-
/* Integral and aggregates go in general registers. So do TFmode FP values.
If we have run out of FR registers, then other FP values must also go in
general registers. This can happen when we have a SFmode HFA. */
static int
ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
{
- if (GET_CODE (x) == UNSPEC)
+ switch (XINT (x, 1))
{
- switch (XINT (x, 1))
- {
- case UNSPEC_LDA:
- case UNSPEC_LDS:
- case UNSPEC_LDSA:
- case UNSPEC_LDCCLR:
- case UNSPEC_CHKACLR:
- case UNSPEC_CHKS:
- /* These unspecs are just wrappers. */
- return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
- }
+ case UNSPEC_LDA:
+ case UNSPEC_LDS:
+ case UNSPEC_LDSA:
+ case UNSPEC_LDCCLR:
+ case UNSPEC_CHKACLR:
+ case UNSPEC_CHKS:
+ /* These unspecs are just wrappers. */
+ return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
}
return default_unspec_may_trap_p (x, flags);
{
unsigned int i;
cl_deferred_option *opt;
- VEC(cl_deferred_option,heap) *vec
- = (VEC(cl_deferred_option,heap) *) ia64_deferred_options;
+ vec<cl_deferred_option> *v
+ = (vec<cl_deferred_option> *) ia64_deferred_options;
- FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
- {
- switch (opt->opt_index)
- {
- case OPT_mfixed_range_:
- fix_range (opt->arg);
- break;
+ if (v)
+ FOR_EACH_VEC_ELT (*v, i, opt)
+ {
+ switch (opt->opt_index)
+ {
+ case OPT_mfixed_range_:
+ fix_range (opt->arg);
+ break;
- default:
- gcc_unreachable ();
- }
- }
+ default:
+ gcc_unreachable ();
+ }
+ }
if (TARGET_AUTO_PIC)
target_flags |= MASK_CONST_GP;
return 1;
case UNSPECV_SET_BSP:
+ case UNSPECV_PROBE_STACK_RANGE:
need_barrier = 1;
break;
case UNSPECV_PSAC_NORMAL:
return 0;
+ case UNSPECV_PROBE_STACK_ADDRESS:
+ need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
+ break;
+
default:
gcc_unreachable ();
}
gcc_unreachable ();
}
- if (first_instruction && INSN_P (insn)
- && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
- && GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER)
+ if (first_instruction && important_for_bundling_p (insn))
{
need_barrier = 0;
first_instruction = 0;
&& scheduled_good_insn (last_scheduled_insn))))
|| (last_scheduled_insn
&& (GET_CODE (last_scheduled_insn) == CALL_INSN
- || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
- || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
+ || unknown_for_bundling_p (last_scheduled_insn))))
{
init_insn_group_barriers ();
if (last_scheduled_insn)
{
- if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
- || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
+ if (unknown_for_bundling_p (last_scheduled_insn))
state_reset (curr_state);
else
{
if (!try_issue_insn (curr_state, insn))
return;
curr_state->accumulated_insns_num++;
- gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
- && asm_noperands (PATTERN (insn)) < 0);
+ gcc_assert (!unknown_for_bundling_p (insn));
if (ia64_safe_type (insn) == TYPE_L)
curr_state->accumulated_insns_num++;
if (!try_issue_insn (curr_state, insn))
return;
curr_state->accumulated_insns_num++;
- if (GET_CODE (PATTERN (insn)) == ASM_INPUT
- || asm_noperands (PATTERN (insn)) >= 0)
+ if (unknown_for_bundling_p (insn))
{
/* Finish bundle containing asm insn. */
curr_state->after_nops_num
}
/* True when INSN is important for bundling. */
+
static bool
important_for_bundling_p (rtx insn)
{
return NULL_RTX;
}
+/* True when INSN is unknown, but important, for bundling. */
+
+static bool
+unknown_for_bundling_p (rtx insn)
+{
+ return (INSN_P (insn)
+ && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER);
+}
+
/* Add a bundle selector TEMPLATE0 before INSN. */
static void
insn != tail;
insn = NEXT_INSN (insn))
if (INSN_P (insn)
- && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
- || GET_CODE (PATTERN (insn)) == USE
- || GET_CODE (PATTERN (insn)) == CLOBBER)
+ && !important_for_bundling_p (insn)
&& GET_MODE (insn) == TImode)
{
PUT_MODE (insn, VOIDmode);
for (next_insn = NEXT_INSN (insn);
next_insn != tail;
next_insn = NEXT_INSN (next_insn))
- if (INSN_P (next_insn)
- && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
- && GET_CODE (PATTERN (next_insn)) != USE
- && GET_CODE (PATTERN (next_insn)) != CLOBBER
+ if (important_for_bundling_p (next_insn)
&& INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
{
PUT_MODE (next_insn, TImode);
insn != NULL_RTX;
insn = next_insn)
{
- gcc_assert (INSN_P (insn)
- && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
- && GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER);
+ gcc_assert (important_for_bundling_p (insn));
type = ia64_safe_type (insn);
next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
insn_num++;
only_bundle_end_p
= (next_insn != NULL_RTX
&& INSN_CODE (insn) == CODE_FOR_insn_group_barrier
- && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
+ && unknown_for_bundling_p (next_insn));
/* We may fill up the current bundle if it is the cycle end
without a group barrier. */
bundle_end_p
curr_state = curr_state->originator)
{
insn = curr_state->insn;
- asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
- || asm_noperands (PATTERN (insn)) >= 0);
+ asm_p = unknown_for_bundling_p (insn);
insn_num++;
if (verbose >= 2 && dump)
{
/* Move the position backward in the window. Group barrier has
no slot. Asm insn takes all bundle. */
if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
- && GET_CODE (PATTERN (insn)) != ASM_INPUT
- && asm_noperands (PATTERN (insn)) < 0)
+ && !unknown_for_bundling_p (insn))
pos--;
/* Long insn takes 2 slots. */
if (ia64_safe_type (insn) == TYPE_L)
gcc_assert (pos >= 0);
if (pos % 3 == 0
&& INSN_CODE (insn) != CODE_FOR_insn_group_barrier
- && GET_CODE (PATTERN (insn)) != ASM_INPUT
- && asm_noperands (PATTERN (insn)) < 0)
+ && !unknown_for_bundling_p (insn))
{
/* The current insn is at the bundle start: emit the
template. */
if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
&& !start_bundle && !end_bundle
&& next_insn
- && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
- && asm_noperands (PATTERN (next_insn)) < 0)
+ && !unknown_for_bundling_p (next_insn))
num--;
start_bundle = false;
&& important_for_bundling_p (insn))
seen_good_insn = 1;
need_barrier_p = (GET_CODE (insn) == CALL_INSN
- || GET_CODE (PATTERN (insn)) == ASM_INPUT
- || asm_noperands (PATTERN (insn)) >= 0);
+ || unknown_for_bundling_p (insn));
}
}
}
|| strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
flags = SECTION_SMALL;
-#if TARGET_ABI_OPEN_VMS
- if (decl && DECL_ATTRIBUTES (decl)
- && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
- flags |= SECTION_VMS_OVERLAY;
-#endif
-
flags |= default_section_type_flags (decl, name, reloc);
return flags;
}
instruction scheduling worth while. Note that use_thunk calls
assemble_start_function and assemble_end_function. */
- insn_locators_alloc ();
emit_all_insn_group_barriers (NULL);
insn = get_insns ();
shorten_branches (insn);
return default_get_reg_raw_mode(regno);
}
+/* Implement TARGET_MEMBER_TYPE_FORCES_BLK. ??? Might not be needed
+ anymore. */
+
+bool
+ia64_member_type_forces_blk (const_tree, enum machine_mode mode)
+{
+ return TARGET_HPUX && mode == TFmode;
+}
+
/* Always default to .text section until HP-UX linker is fixed. */
ATTRIBUTE_UNUSED static section *