/* Subroutines for insn-output.c for SPARC.
- Copyright (C) 1987-2013 Free Software Foundation, Inc.
+ Copyright (C) 1987-2014 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
at Cygnus Support.
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
+#include "stringpool.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "varasm.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "output.h"
#include "insn-attr.h"
#include "flags.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "vec.h"
+#include "machmode.h"
+#include "input.h"
#include "function.h"
#include "except.h"
#include "expr.h"
#include "target.h"
#include "target-def.h"
#include "common/common-target.h"
+#include "hash-table.h"
+#include "basic-block.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-fold.h"
+#include "tree-eh.h"
+#include "gimple-expr.h"
+#include "is-a.h"
#include "gimple.h"
+#include "gimplify.h"
#include "langhooks.h"
#include "reload.h"
#include "params.h"
#include "opts.h"
#include "tree-pass.h"
#include "context.h"
+#include "wide-int.h"
+#include "builtins.h"
+#include "rtl-iter.h"
/* Processor costs */
rtx frame_base_reg;
HOST_WIDE_INT frame_base_offset;
- /* Some local-dynamic TLS symbol name. */
- const char *some_ld_name;
-
/* Number of global or FP registers to be saved (as 4-byte quantities). */
int n_global_fp_regs;
static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
const_tree, bool, bool, int *, int *);
-static int supersparc_adjust_cost (rtx, rtx, rtx, int);
-static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
+static int supersparc_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
+static int hypersparc_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static void sparc_emit_set_const32 (rtx, rtx);
static void sparc_emit_set_const64 (rtx, rtx);
static rtx sparc_builtin_saveregs (void);
static int epilogue_renumber (rtx *, int);
static bool sparc_assemble_integer (rtx, unsigned int, int);
-static int set_extends (rtx);
+static int set_extends (rtx_insn *);
static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
#ifdef TARGET_SOLARIS
static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
tree) ATTRIBUTE_UNUSED;
#endif
-static int sparc_adjust_cost (rtx, rtx, rtx, int);
+static int sparc_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static int sparc_issue_rate (void);
static void sparc_sched_init (FILE *, int, int);
static int sparc_use_sched_lookahead (void);
static bool sparc_function_ok_for_sibcall (tree, tree);
static void sparc_init_libfuncs (void);
static void sparc_init_builtins (void);
+static void sparc_fpu_init_builtins (void);
static void sparc_vis_init_builtins (void);
+static tree sparc_builtin_decl (unsigned, bool);
static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
static tree sparc_fold_builtin (tree, int, tree *, bool);
-static int sparc_vis_mul8x16 (int, int);
-static void sparc_handle_vis_mul8x16 (tree *, int, tree, tree, tree);
static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
static rtx sparc_tls_get_addr (void);
static rtx sparc_tls_got (void);
-static const char *get_some_local_dynamic_name (void);
-static int get_some_local_dynamic_name_1 (rtx *, void *);
static int sparc_register_move_cost (enum machine_mode,
reg_class_t, reg_class_t);
static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
enum machine_mode,
secondary_reload_info *);
static enum machine_mode sparc_cstore_mode (enum insn_code icode);
+static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
\f
#ifdef SUBTARGET_ATTRIBUTE_TABLE
/* Table of valid machine attributes. */
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
-#undef TARGET_INIT_BUILTINS
-#define TARGET_INIT_BUILTINS sparc_init_builtins
#undef TARGET_LEGITIMIZE_ADDRESS
#define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
#undef TARGET_MODE_DEPENDENT_ADDRESS_P
#define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS sparc_init_builtins
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL sparc_builtin_decl
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN sparc_expand_builtin
#undef TARGET_FOLD_BUILTIN
#undef TARGET_CSTORE_MODE
#define TARGET_CSTORE_MODE sparc_cstore_mode
+#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
+
struct gcc_target targetm = TARGET_INITIALIZER;
/* Return the memory reference contained in X if any, zero otherwise. */
pass runs as late as possible. The pass is inserted in the pass pipeline
at the end of sparc_option_override. */
-static bool
-sparc_gate_work_around_errata (void)
-{
- /* The only errata we handle are those of the AT697F and UT699. */
- return sparc_fix_at697f != 0 || sparc_fix_ut699 != 0;
-}
-
static unsigned int
sparc_do_work_around_errata (void)
{
- rtx insn, next;
+ rtx_insn *insn, *next;
/* Force all instructions to be split into their final form. */
split_all_insns_noflow ();
rtx set;
/* Look into the instruction in a delay slot. */
- if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
- insn = XVECEXP (PATTERN (insn), 0, 1);
+ if (NONJUMP_INSN_P (insn))
+ if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
+ insn = seq->insn (1);
/* Look for a single-word load into an odd-numbered FP register. */
if (sparc_fix_at697f
&& REGNO (SET_DEST (set)) % 2 != 0)
{
/* The wrong dependency is on the enclosing double register. */
- unsigned int x = REGNO (SET_DEST (set)) - 1;
+ const unsigned int x = REGNO (SET_DEST (set)) - 1;
unsigned int src1, src2, dest;
int code;
- /* If the insn has a delay slot, then it cannot be problematic. */
next = next_active_insn (insn);
if (!next)
break;
- if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
+ /* If the insn is a branch, then it cannot be problematic. */
+ if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
continue;
extract_insn (next);
dependency on the first single-cycle load. */
rtx x = SET_DEST (set);
- /* If the insn has a delay slot, then it cannot be problematic. */
next = next_active_insn (insn);
if (!next)
break;
- if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
+ /* If the insn is a branch, then it cannot be problematic. */
+ if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
continue;
/* Look for a second memory access to/from an integer register. */
insert_nop = true;
/* STD is *not* affected. */
- else if ((mem = mem_ref (dest)) != NULL_RTX
- && GET_MODE_SIZE (GET_MODE (mem)) <= 4
- && (src == const0_rtx
+ else if (MEM_P (dest)
+ && GET_MODE_SIZE (GET_MODE (dest)) <= 4
+ && (src == CONST0_RTX (GET_MODE (dest))
|| (REG_P (src)
&& REGNO (src) < 32
&& REGNO (src) != REGNO (x)))
- && !reg_mentioned_p (x, XEXP (mem, 0)))
+ && !reg_mentioned_p (x, XEXP (dest, 0)))
insert_nop = true;
}
}
+ /* Look for a single-word load/operation into an FP register. */
+ else if (sparc_fix_ut699
+ && NONJUMP_INSN_P (insn)
+ && (set = single_set (insn)) != NULL_RTX
+ && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
+ && REG_P (SET_DEST (set))
+ && REGNO (SET_DEST (set)) > 31)
+ {
+ /* Number of instructions in the problematic window. */
+ const int n_insns = 4;
+ /* The problematic combination is with the sibling FP register. */
+ const unsigned int x = REGNO (SET_DEST (set));
+ const unsigned int y = x ^ 1;
+ rtx_insn *after;
+ int i;
+
+ next = next_active_insn (insn);
+ if (!next)
+ break;
+ /* If the insn is a branch, then it cannot be problematic. */
+ if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
+ continue;
+
+ /* Look for a second load/operation into the sibling FP register. */
+ if (!((set = single_set (next)) != NULL_RTX
+ && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
+ && REG_P (SET_DEST (set))
+ && REGNO (SET_DEST (set)) == y))
+ continue;
+
+ /* Look for a (possible) store from the FP register in the next N
+ instructions, but bail out if it is again modified or if there
+ is a store from the sibling FP register before this store. */
+ for (after = next, i = 0; i < n_insns; i++)
+ {
+ bool branch_p;
+
+ after = next_active_insn (after);
+ if (!after)
+ break;
+
+ /* This is a branch with an empty delay slot. */
+ if (!NONJUMP_INSN_P (after))
+ {
+ if (++i == n_insns)
+ break;
+ branch_p = true;
+ after = NULL;
+ }
+ /* This is a branch with a filled delay slot. */
+ else if (rtx_sequence *seq =
+ dyn_cast <rtx_sequence *> (PATTERN (after)))
+ {
+ if (++i == n_insns)
+ break;
+ branch_p = true;
+ after = seq->insn (1);
+ }
+ /* This is a regular instruction. */
+ else
+ branch_p = false;
+
+ if (after && (set = single_set (after)) != NULL_RTX)
+ {
+ const rtx src = SET_SRC (set);
+ const rtx dest = SET_DEST (set);
+ const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
+
+ /* If the FP register is again modified before the store,
+ then the store isn't affected. */
+ if (REG_P (dest)
+ && (REGNO (dest) == x
+ || (REGNO (dest) == y && size == 8)))
+ break;
+
+ if (MEM_P (dest) && REG_P (src))
+ {
+ /* If there is a store from the sibling FP register
+ before the store, then the store is not affected. */
+ if (REGNO (src) == y || (REGNO (src) == x && size == 8))
+ break;
+
+ /* Otherwise, the store is affected. */
+ if (REGNO (src) == x && size == 4)
+ {
+ insert_nop = true;
+ break;
+ }
+ }
+ }
+
+ /* If we have a branch in the first M instructions, then we
+ cannot see the (M+2)th instruction so we play safe. */
+ if (branch_p && i <= (n_insns - 2))
+ {
+ insert_nop = true;
+ break;
+ }
+ }
+ }
+
else
next = NEXT_INSN (insn);
RTL_PASS, /* type */
"errata", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- true, /* has_execute */
TV_MACH_DEP, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_verify_rtl_sharing, /* todo_flags_finish */
+ 0, /* todo_flags_finish */
};
class pass_work_around_errata : public rtl_opt_pass
{}
/* opt_pass methods: */
- bool gate () { return sparc_gate_work_around_errata (); }
- unsigned int execute () { return sparc_do_work_around_errata (); }
+ virtual bool gate (function *)
+ {
+ /* The only errata we handle are those of the AT697F and UT699. */
+ return sparc_fix_at697f != 0 || sparc_fix_ut699 != 0;
+ }
+
+ virtual unsigned int execute (function *)
+ {
+ return sparc_do_work_around_errata ();
+ }
}; // class pass_work_around_errata
nop into its delay slot. */
int
-empty_delay_slot (rtx insn)
+empty_delay_slot (rtx_insn *insn)
{
rtx seq;
/* Return nonzero if TRIAL can go into the call delay slot. */
int
-tls_call_delay (rtx trial)
+eligible_for_call_delay (rtx_insn *trial)
{
rtx pat;
+ if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
+ return 0;
+
/* Binutils allows
call __tls_get_addr, %tgd_call (foo)
add %l7, %o0, %o0, %tgd_add (foo)
/* If we have the 'return' instruction, anything that does not use
local or output registers and can go into a delay slot wins. */
- else if (return_p
- && TARGET_V9
- && !epilogue_renumber (&pat, 1)
- && get_attr_in_uncond_branch_delay (trial)
- == IN_UNCOND_BRANCH_DELAY_TRUE)
+ else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
return 1;
/* The 'restore src1,src2,dest' pattern for SImode. */
/* Return nonzero if TRIAL can go into the function return's delay slot. */
int
-eligible_for_return_delay (rtx trial)
+eligible_for_return_delay (rtx_insn *trial)
{
int regno;
rtx pat;
- if (! NONJUMP_INSN_P (trial))
- return 0;
-
- if (get_attr_length (trial) != 1)
- return 0;
-
/* If the function uses __builtin_eh_return, the eh_return machinery
occupies the delay slot. */
if (crtl->calls_eh_return)
return 0;
+ if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
+ return 0;
+
/* In the case of a leaf or flat function, anything can go into the slot. */
if (sparc_leaf_function_p || TARGET_FLAT)
- return
- get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
+ return 1;
+
+ if (!NONJUMP_INSN_P (trial))
+ return 0;
pat = PATTERN (trial);
if (GET_CODE (pat) == PARALLEL)
if (regno >= 8 && regno < 24)
return 0;
}
- return !epilogue_renumber (&pat, 1)
- && (get_attr_in_uncond_branch_delay (trial)
- == IN_UNCOND_BRANCH_DELAY_TRUE);
+ return !epilogue_renumber (&pat, 1);
}
if (GET_CODE (pat) != SET)
instruction, it can probably go in. But restore will not work
with FP_REGS. */
if (! SPARC_INT_REG_P (regno))
- return (TARGET_V9
- && !epilogue_renumber (&pat, 1)
- && get_attr_in_uncond_branch_delay (trial)
- == IN_UNCOND_BRANCH_DELAY_TRUE);
+ return TARGET_V9 && !epilogue_renumber (&pat, 1);
return eligible_for_restore_insn (trial, true);
}
/* Return nonzero if TRIAL can go into the sibling call's delay slot. */
int
-eligible_for_sibcall_delay (rtx trial)
+eligible_for_sibcall_delay (rtx_insn *trial)
{
rtx pat;
- if (! NONJUMP_INSN_P (trial) || GET_CODE (PATTERN (trial)) != SET)
+ if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
return 0;
- if (get_attr_length (trial) != 1)
+ if (!NONJUMP_INSN_P (trial))
return 0;
pat = PATTERN (trial);
return 1;
}
+ if (GET_CODE (pat) != SET)
+ return 0;
+
/* Otherwise, only operations which can be done in tandem with
a `restore' insn can go into the delay slot. */
if (GET_CODE (SET_DEST (pat)) != REG
static rtx
sparc_legitimize_tls_address (rtx addr)
{
- rtx temp1, temp2, temp3, ret, o0, got, insn;
+ rtx temp1, temp2, temp3, ret, o0, got;
+ rtx_insn *insn;
gcc_assert (can_create_pseudo_p ());
|| (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
{
rtx pic_ref, address;
- rtx insn;
+ rtx_insn *insn;
if (reg == 0)
{
void
sparc_emit_call_insn (rtx pat, rtx addr)
{
- rtx insn;
+ rtx_insn *insn;
insn = emit_call_insn (pat);
for (i = 0; i < NUM_MACHINE_MODES; i++)
{
- switch (GET_MODE_CLASS (i))
+ enum machine_mode m = (enum machine_mode) i;
+ unsigned int size = GET_MODE_SIZE (m);
+
+ switch (GET_MODE_CLASS (m))
{
case MODE_INT:
case MODE_PARTIAL_INT:
case MODE_COMPLEX_INT:
- if (GET_MODE_SIZE (i) < 4)
+ if (size < 4)
sparc_mode_class[i] = 1 << (int) H_MODE;
- else if (GET_MODE_SIZE (i) == 4)
+ else if (size == 4)
sparc_mode_class[i] = 1 << (int) S_MODE;
- else if (GET_MODE_SIZE (i) == 8)
+ else if (size == 8)
sparc_mode_class[i] = 1 << (int) D_MODE;
- else if (GET_MODE_SIZE (i) == 16)
+ else if (size == 16)
sparc_mode_class[i] = 1 << (int) T_MODE;
- else if (GET_MODE_SIZE (i) == 32)
+ else if (size == 32)
sparc_mode_class[i] = 1 << (int) O_MODE;
else
sparc_mode_class[i] = 0;
break;
case MODE_VECTOR_INT:
- if (GET_MODE_SIZE (i) == 4)
+ if (size == 4)
sparc_mode_class[i] = 1 << (int) SF_MODE;
- else if (GET_MODE_SIZE (i) == 8)
+ else if (size == 8)
sparc_mode_class[i] = 1 << (int) DF_MODE;
else
sparc_mode_class[i] = 0;
break;
case MODE_FLOAT:
case MODE_COMPLEX_FLOAT:
- if (GET_MODE_SIZE (i) == 4)
+ if (size == 4)
sparc_mode_class[i] = 1 << (int) SF_MODE;
- else if (GET_MODE_SIZE (i) == 8)
+ else if (size == 8)
sparc_mode_class[i] = 1 << (int) DF_MODE;
- else if (GET_MODE_SIZE (i) == 16)
+ else if (size == 16)
sparc_mode_class[i] = 1 << (int) TF_MODE;
- else if (GET_MODE_SIZE (i) == 32)
+ else if (size == 32)
sparc_mode_class[i] = 1 << (int) OF_MODE;
else
sparc_mode_class[i] = 0;
break;
case MODE_CC:
- if (i == (int) CCFPmode || i == (int) CCFPEmode)
+ if (m == CCFPmode || m == CCFPEmode)
sparc_mode_class[i] = 1 << (int) CCFP_MODE;
else
sparc_mode_class[i] = 1 << (int) CC_MODE;
sorr_act_t action_true, sorr_act_t action_false)
{
unsigned int i;
- rtx mem, insn;
+ rtx mem;
+ rtx_insn *insn;
if (TARGET_ARCH64 && high <= 32)
{
/* Emit a window_save insn. */
-static rtx
+static rtx_insn *
emit_window_save (rtx increment)
{
- rtx insn = emit_insn (gen_window_save (increment));
+ rtx_insn *insn = emit_insn (gen_window_save (increment));
RTX_FRAME_RELATED_P (insn) = 1;
/* The incoming return address (%o7) is saved in %i7. */
sparc_expand_prologue (void)
{
HOST_WIDE_INT size;
- rtx insn;
+ rtx_insn *insn;
/* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
on the final value of the flag means deferring the prologue/epilogue
if (flag_stack_usage_info)
current_function_static_stack_size = size;
- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
- sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ {
+ if (crtl->is_leaf && !cfun->calls_alloca)
+ {
+ if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
+ sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
+ size - STACK_CHECK_PROTECT);
+ }
+ else if (size > 0)
+ sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ }
if (size == 0)
; /* do nothing. */
sparc_flat_expand_prologue (void)
{
HOST_WIDE_INT size;
- rtx insn;
+ rtx_insn *insn;
sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
if (flag_stack_usage_info)
current_function_static_stack_size = size;
- if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
- sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
+ {
+ if (crtl->is_leaf && !cfun->calls_alloca)
+ {
+ if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
+ sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
+ size - STACK_CHECK_PROTECT);
+ }
+ else if (size > 0)
+ sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
+ }
if (sparc_save_local_in_regs_p)
emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
/* Output a return. */
const char *
-output_return (rtx insn)
+output_return (rtx_insn *insn)
{
if (crtl->calls_eh_return)
{
/* Output a sibling call. */
const char *
-output_sibcall (rtx insn, rtx call_operand)
+output_sibcall (rtx_insn *insn, rtx call_operand)
{
rtx operands[1];
if (final_sequence)
{
- rtx delay = NEXT_INSN (insn);
+ rtx_insn *delay = NEXT_INSN (insn);
gcc_assert (delay);
output_restore (PATTERN (delay));
if (integer_zerop (DECL_SIZE (field)))
continue;
- if (host_integerp (bit_position (field), 1))
+ if (tree_fits_uhwi_p (bit_position (field)))
bitpos += int_bit_position (field);
}
if (integer_zerop (DECL_SIZE (field)))
continue;
- if (host_integerp (bit_position (field), 1))
+ if (tree_fits_uhwi_p (bit_position (field)))
bitpos += int_bit_position (field);
}
provided. */
rtx ret_reg = gen_rtx_REG (Pmode, 31);
rtx scratch = gen_reg_rtx (SImode);
- rtx endlab = gen_label_rtx ();
+ rtx_code_label *endlab = gen_label_rtx ();
/* Calculate the return object size */
tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
DEST is the destination insn (i.e. the label), INSN is the source. */
const char *
-output_ubranch (rtx dest, rtx insn)
+output_ubranch (rtx dest, rtx_insn *insn)
{
static char string[64];
bool v9_form = false;
const char *
output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
- rtx insn)
+ rtx_insn *insn)
{
static char string[64];
enum rtx_code code = GET_CODE (op);
if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
{
strcpy (p,
- ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
+ ((XINT (note, 0) >= REG_BR_PROB_BASE / 2) ^ far)
? ",pt" : ",pn");
p += 3;
spaces -= 3;
void
sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
{
- rtx neglab, donelab, i0, i1, f0, in, out;
+ rtx i0, i1, f0, in, out;
out = operands[0];
in = force_reg (DImode, operands[1]);
- neglab = gen_label_rtx ();
- donelab = gen_label_rtx ();
+ rtx_code_label *neglab = gen_label_rtx ();
+ rtx_code_label *donelab = gen_label_rtx ();
i0 = gen_reg_rtx (DImode);
i1 = gen_reg_rtx (DImode);
f0 = gen_reg_rtx (mode);
void
sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
{
- rtx neglab, donelab, i0, i1, f0, in, out, limit;
+ rtx i0, i1, f0, in, out, limit;
out = operands[0];
in = force_reg (mode, operands[1]);
- neglab = gen_label_rtx ();
- donelab = gen_label_rtx ();
+ rtx_code_label *neglab = gen_label_rtx ();
+ rtx_code_label *donelab = gen_label_rtx ();
i0 = gen_reg_rtx (DImode);
i1 = gen_reg_rtx (DImode);
limit = gen_reg_rtx (mode);
and OP is the conditional expression. */
const char *
-output_cbcond (rtx op, rtx dest, rtx insn)
+output_cbcond (rtx op, rtx dest, rtx_insn *insn)
{
enum machine_mode mode = GET_MODE (XEXP (op, 0));
enum rtx_code code = GET_CODE (op);
const char *
output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
- int annul, rtx insn)
+ int annul, rtx_insn *insn)
{
static char string[64];
enum rtx_code code = GET_CODE (op);
if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
{
strcpy (p,
- ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
+ ((XINT (note, 0) >= REG_BR_PROB_BASE / 2) ^ far)
? ",pt" : ",pn");
p += 3;
}
return 0;
}
-/* Return 1 if x and y are some kind of REG and they refer to
- different hard registers. This test is guaranteed to be
- run after reload. */
-
-int
-sparc_absnegfloat_split_legitimate (rtx x, rtx y)
-{
- if (GET_CODE (x) != REG)
- return 0;
- if (GET_CODE (y) != REG)
- return 0;
- if (REGNO (x) == REGNO (y))
- return 0;
- return 1;
-}
-
/* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
This makes them candidates for using ldd and std insns.
return 1;
}
+/* Return the widened memory access made of MEM1 and MEM2 in MODE. */
+
+rtx
+widen_mem_for_ldd_peep (rtx mem1, rtx mem2, enum machine_mode mode)
+{
+ rtx x = widen_memory_access (mem1, mode, 0);
+ MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
+ return x;
+}
+
/* Return 1 if reg is a pseudo, or is the first register in
a hard register pair. This makes it suitable for use in
ldd and std insns. */
return;
case '&':
/* Print some local dynamic TLS name. */
- assemble_name (file, get_some_local_dynamic_name ());
+ if (const char *name = get_some_local_dynamic_name ())
+ assemble_name (file, name);
+ else
+ output_operand_lossage ("'%%&' used without any "
+ "local dynamic TLS references");
return;
case 'Y':
/* On UltraSPARC a flush flushes an entire cache line. The trampoline is
aligned on a 16 byte boundary so one flush clears it all. */
- emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
+ emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
if (sparc_cpu != PROCESSOR_ULTRASPARC
&& sparc_cpu != PROCESSOR_ULTRASPARC3
&& sparc_cpu != PROCESSOR_NIAGARA
&& sparc_cpu != PROCESSOR_NIAGARA2
&& sparc_cpu != PROCESSOR_NIAGARA3
&& sparc_cpu != PROCESSOR_NIAGARA4)
- emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
+ emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
/* Call __enable_execute_stack after writing onto the stack to make sure
the stack address is accessible. */
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
static int
-supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+supersparc_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type insn_type;
}
static int
-hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+hypersparc_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type insn_type, dep_type;
rtx pat = PATTERN(insn);
}
static int
-sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
+sparc_adjust_cost(rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
{
switch (sparc_cpu)
{
}
static int
-set_extends (rtx insn)
+set_extends (rtx_insn *insn)
{
register rtx pat = PATTERN (insn);
unknown. Return 1 if the high bits are zero, -1 if the register is
sign extended. */
int
-sparc_check_64 (rtx x, rtx insn)
+sparc_check_64 (rtx x, rtx_insn *insn)
{
/* If a register is set only once it is safe to ignore insns this
code does not know how to handle. The loop will either recognize
OPERANDS are its operands and OPCODE is the mnemonic to be used. */
const char *
-output_v8plus_shift (rtx insn, rtx *operands, const char *opcode)
+output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
{
static char asm_code[60];
}
}
\f
-static tree def_builtin(const char *name, int code, tree type)
+/* SPARC builtins. */
+enum sparc_builtins
+{
+ /* FPU builtins. */
+ SPARC_BUILTIN_LDFSR,
+ SPARC_BUILTIN_STFSR,
+
+ /* VIS 1.0 builtins. */
+ SPARC_BUILTIN_FPACK16,
+ SPARC_BUILTIN_FPACK32,
+ SPARC_BUILTIN_FPACKFIX,
+ SPARC_BUILTIN_FEXPAND,
+ SPARC_BUILTIN_FPMERGE,
+ SPARC_BUILTIN_FMUL8X16,
+ SPARC_BUILTIN_FMUL8X16AU,
+ SPARC_BUILTIN_FMUL8X16AL,
+ SPARC_BUILTIN_FMUL8SUX16,
+ SPARC_BUILTIN_FMUL8ULX16,
+ SPARC_BUILTIN_FMULD8SUX16,
+ SPARC_BUILTIN_FMULD8ULX16,
+ SPARC_BUILTIN_FALIGNDATAV4HI,
+ SPARC_BUILTIN_FALIGNDATAV8QI,
+ SPARC_BUILTIN_FALIGNDATAV2SI,
+ SPARC_BUILTIN_FALIGNDATADI,
+ SPARC_BUILTIN_WRGSR,
+ SPARC_BUILTIN_RDGSR,
+ SPARC_BUILTIN_ALIGNADDR,
+ SPARC_BUILTIN_ALIGNADDRL,
+ SPARC_BUILTIN_PDIST,
+ SPARC_BUILTIN_EDGE8,
+ SPARC_BUILTIN_EDGE8L,
+ SPARC_BUILTIN_EDGE16,
+ SPARC_BUILTIN_EDGE16L,
+ SPARC_BUILTIN_EDGE32,
+ SPARC_BUILTIN_EDGE32L,
+ SPARC_BUILTIN_FCMPLE16,
+ SPARC_BUILTIN_FCMPLE32,
+ SPARC_BUILTIN_FCMPNE16,
+ SPARC_BUILTIN_FCMPNE32,
+ SPARC_BUILTIN_FCMPGT16,
+ SPARC_BUILTIN_FCMPGT32,
+ SPARC_BUILTIN_FCMPEQ16,
+ SPARC_BUILTIN_FCMPEQ32,
+ SPARC_BUILTIN_FPADD16,
+ SPARC_BUILTIN_FPADD16S,
+ SPARC_BUILTIN_FPADD32,
+ SPARC_BUILTIN_FPADD32S,
+ SPARC_BUILTIN_FPSUB16,
+ SPARC_BUILTIN_FPSUB16S,
+ SPARC_BUILTIN_FPSUB32,
+ SPARC_BUILTIN_FPSUB32S,
+ SPARC_BUILTIN_ARRAY8,
+ SPARC_BUILTIN_ARRAY16,
+ SPARC_BUILTIN_ARRAY32,
+
+ /* VIS 2.0 builtins. */
+ SPARC_BUILTIN_EDGE8N,
+ SPARC_BUILTIN_EDGE8LN,
+ SPARC_BUILTIN_EDGE16N,
+ SPARC_BUILTIN_EDGE16LN,
+ SPARC_BUILTIN_EDGE32N,
+ SPARC_BUILTIN_EDGE32LN,
+ SPARC_BUILTIN_BMASK,
+ SPARC_BUILTIN_BSHUFFLEV4HI,
+ SPARC_BUILTIN_BSHUFFLEV8QI,
+ SPARC_BUILTIN_BSHUFFLEV2SI,
+ SPARC_BUILTIN_BSHUFFLEDI,
+
+ /* VIS 3.0 builtins. */
+ SPARC_BUILTIN_CMASK8,
+ SPARC_BUILTIN_CMASK16,
+ SPARC_BUILTIN_CMASK32,
+ SPARC_BUILTIN_FCHKSM16,
+ SPARC_BUILTIN_FSLL16,
+ SPARC_BUILTIN_FSLAS16,
+ SPARC_BUILTIN_FSRL16,
+ SPARC_BUILTIN_FSRA16,
+ SPARC_BUILTIN_FSLL32,
+ SPARC_BUILTIN_FSLAS32,
+ SPARC_BUILTIN_FSRL32,
+ SPARC_BUILTIN_FSRA32,
+ SPARC_BUILTIN_PDISTN,
+ SPARC_BUILTIN_FMEAN16,
+ SPARC_BUILTIN_FPADD64,
+ SPARC_BUILTIN_FPSUB64,
+ SPARC_BUILTIN_FPADDS16,
+ SPARC_BUILTIN_FPADDS16S,
+ SPARC_BUILTIN_FPSUBS16,
+ SPARC_BUILTIN_FPSUBS16S,
+ SPARC_BUILTIN_FPADDS32,
+ SPARC_BUILTIN_FPADDS32S,
+ SPARC_BUILTIN_FPSUBS32,
+ SPARC_BUILTIN_FPSUBS32S,
+ SPARC_BUILTIN_FUCMPLE8,
+ SPARC_BUILTIN_FUCMPNE8,
+ SPARC_BUILTIN_FUCMPGT8,
+ SPARC_BUILTIN_FUCMPEQ8,
+ SPARC_BUILTIN_FHADDS,
+ SPARC_BUILTIN_FHADDD,
+ SPARC_BUILTIN_FHSUBS,
+ SPARC_BUILTIN_FHSUBD,
+ SPARC_BUILTIN_FNHADDS,
+ SPARC_BUILTIN_FNHADDD,
+ SPARC_BUILTIN_UMULXHI,
+ SPARC_BUILTIN_XMULX,
+ SPARC_BUILTIN_XMULXHI,
+
+ SPARC_BUILTIN_MAX
+};
+
+static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
+static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
+
+/* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
+ function decl or NULL_TREE if the builtin was not added. */
+
+static tree
+def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
+ tree type)
{
- return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
- NULL_TREE);
+ tree t
+ = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
+
+ if (t)
+ {
+ sparc_builtins[code] = t;
+ sparc_builtins_icode[code] = icode;
+ }
+
+ return t;
}
-static tree def_builtin_const(const char *name, int code, tree type)
+/* Likewise, but also marks the function as "const". */
+
+static tree
+def_builtin_const (const char *name, enum insn_code icode,
+ enum sparc_builtins code, tree type)
{
- tree t = def_builtin(name, code, type);
+ tree t = def_builtin (name, icode, code, type);
if (t)
TREE_READONLY (t) = 1;
static void
sparc_init_builtins (void)
{
+ if (TARGET_FPU)
+ sparc_fpu_init_builtins ();
+
if (TARGET_VIS)
sparc_vis_init_builtins ();
}
-/* Create builtin functions for VIS 1.0 instructions. */
+/* Create builtin functions for FPU instructions. */
+
+static void
+sparc_fpu_init_builtins (void)
+{
+ tree ftype
+ = build_function_type_list (void_type_node,
+ build_pointer_type (unsigned_type_node), 0);
+ def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
+ SPARC_BUILTIN_LDFSR, ftype);
+ def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
+ SPARC_BUILTIN_STFSR, ftype);
+}
+
+/* Create builtin functions for VIS instructions. */
static void
sparc_vis_init_builtins (void)
/* Packing and expanding vectors. */
def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
- v4qi_ftype_v4hi);
+ SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
- v8qi_ftype_v2si_v8qi);
+ SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
- v2hi_ftype_v2si);
+ SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
- v4hi_ftype_v4qi);
+ SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
- v8qi_ftype_v4qi_v4qi);
+ SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
/* Multiplications. */
def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
- v4hi_ftype_v4qi_v4hi);
+ SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
- v4hi_ftype_v4qi_v2hi);
+ SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
- v4hi_ftype_v4qi_v2hi);
+ SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
- v4hi_ftype_v8qi_v4hi);
+ SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
- v4hi_ftype_v8qi_v4hi);
+ SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
- v2si_ftype_v4qi_v2hi);
+ SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
- v2si_ftype_v4qi_v2hi);
+ SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
/* Data aligning. */
def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
- v8qi_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
- void_ftype_di);
+ SPARC_BUILTIN_WRGSR, void_ftype_di);
def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
- di_ftype_void);
+ SPARC_BUILTIN_RDGSR, di_ftype_void);
if (TARGET_ARCH64)
{
def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
- ptr_ftype_ptr_di);
+ SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
- ptr_ftype_ptr_di);
+ SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
}
else
{
def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
- ptr_ftype_ptr_si);
+ SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
- ptr_ftype_ptr_si);
+ SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
}
/* Pixel distance. */
def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
- di_ftype_v8qi_v8qi_di);
+ SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
/* Edge handling. */
if (TARGET_ARCH64)
{
def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
- di_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
- di_ftype_ptr_ptr);
- if (TARGET_VIS2)
- {
- def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
- di_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
- di_ftype_ptr_ptr);
- }
+ SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
}
else
{
def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
- si_ftype_ptr_ptr);
+ SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
- si_ftype_ptr_ptr);
- if (TARGET_VIS2)
- {
- def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
- si_ftype_ptr_ptr);
- def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
- si_ftype_ptr_ptr);
- }
+ SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
}
/* Pixel compare. */
if (TARGET_ARCH64)
{
def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
- di_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
- di_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
- di_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
- di_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
- di_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
- di_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
- di_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
- di_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
}
else
{
def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
- si_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
- si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
- si_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
- si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
- si_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
- si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
- si_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
- si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
}
/* Addition and subtraction. */
def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
- v2hi_ftype_v2hi_v2hi);
+ SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
- v1si_ftype_v1si_v1si);
+ SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
- v2hi_ftype_v2hi_v2hi);
+ SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
- v1si_ftype_v1si_v1si);
+ SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
/* Three-dimensional array addressing. */
if (TARGET_ARCH64)
{
def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
}
else
{
def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
- si_ftype_si_si);
+ SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
- si_ftype_si_si);
+ SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
- si_ftype_si_si);
- }
+ SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
+ }
if (TARGET_VIS2)
{
- /* Byte mask and shuffle */
+ /* Edge handling. */
+ if (TARGET_ARCH64)
+ {
+ def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
+ SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
+ SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
+ SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
+ SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
+ SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
+ SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
+ }
+ else
+ {
+ def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
+ SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
+ SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
+ SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
+ SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
+ SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
+ def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
+ SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
+ }
+
+ /* Byte mask and shuffle. */
if (TARGET_ARCH64)
def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_BMASK, di_ftype_di_di);
else
def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
- si_ftype_si_si);
+ SPARC_BUILTIN_BMASK, si_ftype_si_si);
def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
- v8qi_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
}
if (TARGET_VIS3)
if (TARGET_ARCH64)
{
def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
- void_ftype_di);
+ SPARC_BUILTIN_CMASK8, void_ftype_di);
def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
- void_ftype_di);
+ SPARC_BUILTIN_CMASK16, void_ftype_di);
def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
- void_ftype_di);
+ SPARC_BUILTIN_CMASK32, void_ftype_di);
}
else
{
def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
- void_ftype_si);
+ SPARC_BUILTIN_CMASK8, void_ftype_si);
def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
- void_ftype_si);
+ SPARC_BUILTIN_CMASK16, void_ftype_si);
def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
- void_ftype_si);
+ SPARC_BUILTIN_CMASK32, void_ftype_si);
}
def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
if (TARGET_ARCH64)
def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
else
def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_FPADD64, di_ftype_di_di);
def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
- v2hi_ftype_v2hi_v2hi);
+ SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
- v4hi_ftype_v4hi_v4hi);
+ SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
- v2hi_ftype_v2hi_v2hi);
+ SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
- v1si_ftype_v1si_v1si);
+ SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
- v2si_ftype_v2si_v2si);
+ SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
- v1si_ftype_v1si_v1si);
+ SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
if (TARGET_ARCH64)
{
def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
- di_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
}
else
{
def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
- si_ftype_v8qi_v8qi);
+ SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
}
def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
- sf_ftype_sf_sf);
+ SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
- df_ftype_df_df);
+ SPARC_BUILTIN_FHADDD, df_ftype_df_df);
def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
- sf_ftype_sf_sf);
+ SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
- df_ftype_df_df);
+ SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
- sf_ftype_sf_sf);
+ SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
- df_ftype_df_df);
+ SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_XMULX, di_ftype_di_di);
def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
- di_ftype_di_di);
+ SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
}
}
-/* Handle TARGET_EXPAND_BUILTIN target hook.
- Expand builtin functions for sparc intrinsics. */
+/* Implement TARGET_BUILTIN_DECL hook. */
+
+static tree
+sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (code >= SPARC_BUILTIN_MAX)
+ return error_mark_node;
+
+ return sparc_builtins[code];
+}
+
+/* Implemented TARGET_EXPAND_BUILTIN hook. */
static rtx
sparc_expand_builtin (tree exp, rtx target,
enum machine_mode tmode ATTRIBUTE_UNUSED,
int ignore ATTRIBUTE_UNUSED)
{
- tree arg;
- call_expr_arg_iterator iter;
tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- unsigned int icode = DECL_FUNCTION_CODE (fndecl);
- rtx pat, op[4];
+ enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
+ enum insn_code icode = sparc_builtins_icode[code];
+ bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
+ call_expr_arg_iterator iter;
int arg_count = 0;
- bool nonvoid;
-
- nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
+ rtx pat, op[4];
+ tree arg;
if (nonvoid)
{
else
op[0] = target;
}
+
FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
{
const struct insn_operand_data *insn_op;
insn_op = &insn_data[icode].operand[idx];
op[arg_count] = expand_normal (arg);
- if (insn_op->mode == V1DImode
- && GET_MODE (op[arg_count]) == DImode)
+ if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
+ {
+ if (!address_operand (op[arg_count], SImode))
+ {
+ op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
+ op[arg_count] = copy_addr_to_reg (op[arg_count]);
+ }
+ op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
+ }
+
+ else if (insn_op->mode == V1DImode
+ && GET_MODE (op[arg_count]) == DImode)
op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
+
else if (insn_op->mode == V1SImode
- && GET_MODE (op[arg_count]) == SImode)
+ && GET_MODE (op[arg_count]) == SImode)
op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
emit_insn (pat);
- if (nonvoid)
- return op[0];
- else
- return const0_rtx;
+ return (nonvoid ? op[0] : const0_rtx);
}
+/* Return the upper 16 bits of the 8x16 multiplication. */
+
static int
sparc_vis_mul8x16 (int e8, int e16)
{
the result into the array N_ELTS, whose elements are of INNER_TYPE. */
static void
-sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type,
- tree cst0, tree cst1)
+sparc_handle_vis_mul8x16 (tree *n_elts, enum sparc_builtins fncode,
+ tree inner_type, tree cst0, tree cst1)
{
unsigned i, num = VECTOR_CST_NELTS (cst0);
int scale;
switch (fncode)
{
- case CODE_FOR_fmul8x16_vis:
+ case SPARC_BUILTIN_FMUL8X16:
for (i = 0; i < num; ++i)
{
int val
}
break;
- case CODE_FOR_fmul8x16au_vis:
+ case SPARC_BUILTIN_FMUL8X16AU:
scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
for (i = 0; i < num; ++i)
}
break;
- case CODE_FOR_fmul8x16al_vis:
+ case SPARC_BUILTIN_FMUL8X16AL:
scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
for (i = 0; i < num; ++i)
}
}
-/* Handle TARGET_FOLD_BUILTIN target hook.
+/* Implement TARGET_FOLD_BUILTIN hook.
+
Fold builtin functions for SPARC intrinsics. If IGNORE is true the
result of the function call is ignored. NULL_TREE is returned if the
function could not be folded. */
sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
tree *args, bool ignore)
{
- tree arg0, arg1, arg2;
+ enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
- enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
+ tree arg0, arg1, arg2;
if (ignore)
- {
- /* Note that a switch statement instead of the sequence of tests would
- be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
- and that would yield multiple alternatives with identical values. */
- if (icode == CODE_FOR_alignaddrsi_vis
- || icode == CODE_FOR_alignaddrdi_vis
- || icode == CODE_FOR_wrgsr_vis
- || icode == CODE_FOR_bmasksi_vis
- || icode == CODE_FOR_bmaskdi_vis
- || icode == CODE_FOR_cmask8si_vis
- || icode == CODE_FOR_cmask8di_vis
- || icode == CODE_FOR_cmask16si_vis
- || icode == CODE_FOR_cmask16di_vis
- || icode == CODE_FOR_cmask32si_vis
- || icode == CODE_FOR_cmask32di_vis)
- ;
- else
+ switch (code)
+ {
+ case SPARC_BUILTIN_LDFSR:
+ case SPARC_BUILTIN_STFSR:
+ case SPARC_BUILTIN_ALIGNADDR:
+ case SPARC_BUILTIN_WRGSR:
+ case SPARC_BUILTIN_BMASK:
+ case SPARC_BUILTIN_CMASK8:
+ case SPARC_BUILTIN_CMASK16:
+ case SPARC_BUILTIN_CMASK32:
+ break;
+
+ default:
return build_zero_cst (rtype);
- }
+ }
- switch (icode)
+ switch (code)
{
- case CODE_FOR_fexpand_vis:
+ case SPARC_BUILTIN_FEXPAND:
arg0 = args[0];
STRIP_NOPS (arg0);
}
break;
- case CODE_FOR_fmul8x16_vis:
- case CODE_FOR_fmul8x16au_vis:
- case CODE_FOR_fmul8x16al_vis:
+ case SPARC_BUILTIN_FMUL8X16:
+ case SPARC_BUILTIN_FMUL8X16AU:
+ case SPARC_BUILTIN_FMUL8X16AL:
arg0 = args[0];
arg1 = args[1];
STRIP_NOPS (arg0);
{
tree inner_type = TREE_TYPE (rtype);
tree *n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
- sparc_handle_vis_mul8x16 (n_elts, icode, inner_type, arg0, arg1);
+ sparc_handle_vis_mul8x16 (n_elts, code, inner_type, arg0, arg1);
return build_vector (rtype, n_elts);
}
break;
- case CODE_FOR_fpmerge_vis:
+ case SPARC_BUILTIN_FPMERGE:
arg0 = args[0];
arg1 = args[1];
STRIP_NOPS (arg0);
}
break;
- case CODE_FOR_pdist_vis:
+ case SPARC_BUILTIN_PDIST:
+ case SPARC_BUILTIN_PDISTN:
arg0 = args[0];
arg1 = args[1];
- arg2 = args[2];
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
- STRIP_NOPS (arg2);
+ if (code == SPARC_BUILTIN_PDIST)
+ {
+ arg2 = args[2];
+ STRIP_NOPS (arg2);
+ }
+ else
+ arg2 = integer_zero_node;
if (TREE_CODE (arg0) == VECTOR_CST
&& TREE_CODE (arg1) == VECTOR_CST
&& TREE_CODE (arg2) == INTEGER_CST)
{
bool overflow = false;
- double_int result = TREE_INT_CST (arg2);
- double_int tmp;
+ widest_int result = wi::to_widest (arg2);
+ widest_int tmp;
unsigned i;
for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
{
- double_int e0 = TREE_INT_CST (VECTOR_CST_ELT (arg0, i));
- double_int e1 = TREE_INT_CST (VECTOR_CST_ELT (arg1, i));
+ tree e0 = VECTOR_CST_ELT (arg0, i);
+ tree e1 = VECTOR_CST_ELT (arg1, i);
bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
- tmp = e1.neg_with_overflow (&neg1_ovf);
- tmp = e0.add_with_sign (tmp, false, &add1_ovf);
- if (tmp.is_negative ())
- tmp = tmp.neg_with_overflow (&neg2_ovf);
-
- result = result.add_with_sign (tmp, false, &add2_ovf);
+ tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
+ tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
+ if (wi::neg_p (tmp))
+ tmp = wi::neg (tmp, &neg2_ovf);
+ else
+ neg2_ovf = false;
+ result = wi::add (result, tmp, SIGNED, &add2_ovf);
overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf;
}
gcc_assert (!overflow);
- return build_int_cst_wide (rtype, result.low, result.high);
+ return wide_int_to_tree (rtype, result);
}
default:
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
- rtx this_rtx, insn, funexp;
+ rtx this_rtx, funexp;
+ rtx_insn *insn;
unsigned int int_arg_first;
reload_completed = 1;
static struct machine_function *
sparc_init_machine_status (void)
{
- return ggc_alloc_cleared_machine_function ();
-}
-
-/* Locate some local-dynamic symbol still in use by this function
- so that we can print its name in local-dynamic base patterns. */
-
-static const char *
-get_some_local_dynamic_name (void)
-{
- rtx insn;
-
- if (cfun->machine->some_ld_name)
- return cfun->machine->some_ld_name;
-
- for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
- if (INSN_P (insn)
- && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
- return cfun->machine->some_ld_name;
-
- gcc_unreachable ();
-}
-
-static int
-get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
-{
- rtx x = *px;
-
- if (x
- && GET_CODE (x) == SYMBOL_REF
- && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
- {
- cfun->machine->some_ld_name = XSTR (x, 0);
- return 1;
- }
-
- return 0;
+ return ggc_cleared_alloc<machine_function> ();
}
/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
rtx newvalue = gen_reg_rtx (SImode);
rtx res = gen_reg_rtx (SImode);
rtx resv = gen_reg_rtx (SImode);
- rtx memsi, val, mask, end_label, loop_label, cc;
+ rtx memsi, val, mask, cc;
emit_insn (gen_rtx_SET (VOIDmode, addr,
gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
emit_insn (gen_rtx_SET (VOIDmode, newv,
gen_rtx_AND (SImode, newv, mask)));
- end_label = gen_label_rtx ();
- loop_label = gen_label_rtx ();
+ rtx_code_label *end_label = gen_label_rtx ();
+ rtx_code_label *loop_label = gen_label_rtx ();
emit_label (loop_label);
emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
OPERANDS are its operands and OPCODE is the mnemonic to be used. */
const char *
-output_v8plus_mult (rtx insn, rtx *operands, const char *opcode)
+output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
{
char mulstr[32];
return true;
}
-static enum machine_mode sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
+/* Implement TARGET_CSTORE_MODE. */
+
+static enum machine_mode
+sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
{
return (TARGET_ARCH64 ? DImode : SImode);
}
+/* Return the compound expression made of T1 and T2. */
+
+static inline tree
+compound_expr (tree t1, tree t2)
+{
+ return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
+}
+
+/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
+
+static void
+sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
+{
+ if (!TARGET_FPU)
+ return;
+
+ const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
+ const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
+
+ /* We generate the equivalent of feholdexcept (&fenv_var):
+
+ unsigned int fenv_var;
+ __builtin_store_fsr (&fenv_var);
+
+ unsigned int tmp1_var;
+ tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
+
+ __builtin_load_fsr (&tmp1_var); */
+
+ tree fenv_var = create_tmp_var (unsigned_type_node, NULL);
+ mark_addressable (fenv_var);
+ tree fenv_addr = build_fold_addr_expr (fenv_var);
+ tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
+ tree hold_stfsr = build_call_expr (stfsr, 1, fenv_addr);
+
+ tree tmp1_var = create_tmp_var (unsigned_type_node, NULL);
+ mark_addressable (tmp1_var);
+ tree masked_fenv_var
+ = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
+ build_int_cst (unsigned_type_node,
+ ~(accrued_exception_mask | trap_enable_mask)));
+ tree hold_mask
+ = build2 (MODIFY_EXPR, void_type_node, tmp1_var, masked_fenv_var);
+
+ tree tmp1_addr = build_fold_addr_expr (tmp1_var);
+ tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
+ tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
+
+ *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
+
+ /* We reload the value of tmp1_var to clear the exceptions:
+
+ __builtin_load_fsr (&tmp1_var); */
+
+ *clear = build_call_expr (ldfsr, 1, tmp1_addr);
+
+ /* We generate the equivalent of feupdateenv (&fenv_var):
+
+ unsigned int tmp2_var;
+ __builtin_store_fsr (&tmp2_var);
+
+ __builtin_load_fsr (&fenv_var);
+
+ if (SPARC_LOW_FE_EXCEPT_VALUES)
+ tmp2_var >>= 5;
+ __atomic_feraiseexcept ((int) tmp2_var); */
+
+ tree tmp2_var = create_tmp_var (unsigned_type_node, NULL);
+ mark_addressable (tmp2_var);
+ tree tmp3_addr = build_fold_addr_expr (tmp2_var);
+ tree update_stfsr = build_call_expr (stfsr, 1, tmp3_addr);
+
+ tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
+
+ tree atomic_feraiseexcept
+ = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
+ tree update_call
+ = build_call_expr (atomic_feraiseexcept, 1,
+ fold_convert (integer_type_node, tmp2_var));
+
+ if (SPARC_LOW_FE_EXCEPT_VALUES)
+ {
+ tree shifted_tmp2_var
+ = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
+ build_int_cst (unsigned_type_node, 5));
+ tree update_shift
+ = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
+ update_call = compound_expr (update_shift, update_call);
+ }
+
+ *update
+ = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
+}
+
#include "gt-sparc.h"