/* Subroutines used for code generation on IBM RS/6000.
- Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
- 2012
- Free Software Foundation, Inc.
+ Copyright (C) 1991-2012 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
#include "except.h"
#include "function.h"
#include "output.h"
+#include "dbxout.h"
#include "basic-block.h"
-#include "integrate.h"
#include "diagnostic-core.h"
#include "toplev.h"
#include "ggc.h"
#include "common/common-target.h"
#include "langhooks.h"
#include "reload.h"
-#include "cfglayout.h"
#include "cfgloop.h"
#include "sched-int.h"
#include "gimple.h"
#include "params.h"
#include "tm-constrs.h"
#include "opts.h"
+#include "tree-vectorizer.h"
#if TARGET_XCOFF
#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
#endif
0,
};
-/* Instruction costs on RIOS1 processors. */
-static const
-struct processor_costs rios1_cost = {
- COSTS_N_INSNS (5), /* mulsi */
- COSTS_N_INSNS (4), /* mulsi_const */
- COSTS_N_INSNS (3), /* mulsi_const9 */
- COSTS_N_INSNS (5), /* muldi */
- COSTS_N_INSNS (19), /* divsi */
- COSTS_N_INSNS (19), /* divdi */
- COSTS_N_INSNS (2), /* fp */
- COSTS_N_INSNS (2), /* dmul */
- COSTS_N_INSNS (19), /* sdiv */
- COSTS_N_INSNS (19), /* ddiv */
- 128, /* cache line size */
- 64, /* l1 cache */
- 512, /* l2 cache */
- 0, /* streams */
-};
-
-/* Instruction costs on RIOS2 processors. */
-static const
-struct processor_costs rios2_cost = {
- COSTS_N_INSNS (2), /* mulsi */
- COSTS_N_INSNS (2), /* mulsi_const */
- COSTS_N_INSNS (2), /* mulsi_const9 */
- COSTS_N_INSNS (2), /* muldi */
- COSTS_N_INSNS (13), /* divsi */
- COSTS_N_INSNS (13), /* divdi */
- COSTS_N_INSNS (2), /* fp */
- COSTS_N_INSNS (2), /* dmul */
- COSTS_N_INSNS (17), /* sdiv */
- COSTS_N_INSNS (17), /* ddiv */
- 256, /* cache line size */
- 256, /* l1 cache */
- 1024, /* l2 cache */
- 0, /* streams */
-};
-
/* Instruction costs on RS64A processors. */
static const
struct processor_costs rs64a_cost = {
1, /* prefetch streams /*/
};
+/* Instruction costs on PPCE5500 processors. */
+static const
+struct processor_costs ppce5500_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (5), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (14), /* divsi */
+ COSTS_N_INSNS (14), /* divdi */
+ COSTS_N_INSNS (7), /* fp */
+ COSTS_N_INSNS (10), /* dmul */
+ COSTS_N_INSNS (36), /* sdiv */
+ COSTS_N_INSNS (66), /* ddiv */
+ 64, /* cache line size */
+ 32, /* l1 cache */
+ 128, /* l2 cache */
+ 1, /* prefetch streams /*/
+};
+
+/* Instruction costs on PPCE6500 processors. */
+static const
+struct processor_costs ppce6500_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (5), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (14), /* divsi */
+ COSTS_N_INSNS (14), /* divdi */
+ COSTS_N_INSNS (7), /* fp */
+ COSTS_N_INSNS (10), /* dmul */
+ COSTS_N_INSNS (36), /* sdiv */
+ COSTS_N_INSNS (66), /* ddiv */
+ 64, /* cache line size */
+ 32, /* l1 cache */
+ 128, /* l2 cache */
+ 1, /* prefetch streams /*/
+};
+
/* Instruction costs on AppliedMicro Titan processors. */
static const
struct processor_costs titan_cost = {
static tree (*rs6000_veclib_handler) (tree, tree, tree);
\f
-static bool rs6000_function_ok_for_sibcall (tree, tree);
-static const char *rs6000_invalid_within_doloop (const_rtx);
-static bool rs6000_legitimate_address_p (enum machine_mode, rtx, bool);
static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
-static rtx rs6000_generate_compare (rtx, enum machine_mode);
static bool spe_func_has_64bit_regs_p (void);
-static rtx gen_frame_mem_offset (enum machine_mode, rtx, int);
-static unsigned rs6000_hash_constant (rtx);
-static unsigned toc_hash_function (const void *);
-static int toc_hash_eq (const void *, const void *);
-static bool reg_offset_addressing_ok_p (enum machine_mode);
-static bool virtual_stack_registers_memory_p (rtx);
-static bool constant_pool_expr_p (rtx);
-static bool legitimate_small_data_p (enum machine_mode, rtx);
-static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int);
static struct machine_function * rs6000_init_machine_status (void);
-static bool rs6000_assemble_integer (rtx, unsigned int, int);
-#if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
-static void rs6000_assemble_visibility (tree, int);
-#endif
static int rs6000_ra_ever_killed (void);
-static bool rs6000_attribute_takes_identifier_p (const_tree);
static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
-static bool rs6000_ms_bitfield_layout_p (const_tree);
static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
-static void rs6000_eliminate_indexed_memrefs (rtx operands[2]);
-static const char *rs6000_mangle_type (const_tree);
-static void rs6000_set_default_type_attributes (tree);
-static bool rs6000_reg_live_or_pic_offset_p (int);
static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
-static tree rs6000_builtin_vectorized_function (tree, tree, tree);
-static bool rs6000_output_addr_const_extra (FILE *, rtx);
-static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT);
-static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT);
-static void rs6000_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
- tree);
static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
-static bool rs6000_return_in_memory (const_tree, const_tree);
-static rtx rs6000_function_value (const_tree, const_tree, bool);
-static void rs6000_file_start (void);
-#if TARGET_ELF
-static int rs6000_elf_reloc_rw_mask (void);
-static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
-static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
-static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
-static void rs6000_elf_asm_init_sections (void);
-static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
- unsigned HOST_WIDE_INT);
-static void rs6000_elf_encode_section_info (tree, rtx, int)
- ATTRIBUTE_UNUSED;
-#endif
-static bool rs6000_use_blocks_for_constant_p (enum machine_mode, const_rtx);
-static void rs6000_alloc_sdmode_stack_slot (void);
-static void rs6000_instantiate_decls (void);
-#if TARGET_XCOFF
-static void rs6000_xcoff_asm_output_anchor (rtx);
-static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
-static void rs6000_xcoff_asm_init_sections (void);
-static int rs6000_xcoff_reloc_rw_mask (void);
-static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree);
-static section *rs6000_xcoff_select_section (tree, int,
- unsigned HOST_WIDE_INT);
-static void rs6000_xcoff_unique_section (tree, int);
-static section *rs6000_xcoff_select_rtx_section
- (enum machine_mode, rtx, unsigned HOST_WIDE_INT);
-static const char * rs6000_xcoff_strip_name_encoding (const char *);
-static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int);
-static void rs6000_xcoff_file_start (void);
-static void rs6000_xcoff_file_end (void);
-#endif
-static int rs6000_variable_issue (FILE *, int, rtx, int);
-static int rs6000_register_move_cost (enum machine_mode,
- reg_class_t, reg_class_t);
static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
-static bool rs6000_rtx_costs (rtx, int, int, int, int *, bool);
static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
-static int rs6000_debug_address_cost (rtx, bool);
-static int rs6000_adjust_cost (rtx, rtx, rtx, int);
+static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
+ bool);
static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
-static void rs6000_sched_init (FILE *, int, int);
static bool is_microcoded_insn (rtx);
static bool is_nonpipeline_insn (rtx);
static bool is_cracked_insn (rtx);
-static bool is_branch_slot_insn (rtx);
-static bool is_load_insn (rtx);
-static rtx get_store_dest (rtx pat);
-static bool is_store_insn (rtx);
+static bool is_load_insn (rtx, rtx *);
+static bool is_store_insn (rtx, rtx *);
static bool set_to_load_agen (rtx,rtx);
-static bool adjacent_mem_locations (rtx,rtx);
-static int rs6000_adjust_priority (rtx, int);
-static int rs6000_issue_rate (void);
-static bool rs6000_is_costly_dependence (dep_t, int, int);
-static rtx get_next_active_insn (rtx, rtx);
static bool insn_terminates_group_p (rtx , enum group_termination);
static bool insn_must_be_first_in_group (rtx);
static bool insn_must_be_last_in_group (rtx);
-static bool is_costly_group (rtx *, rtx);
-static int force_new_group (int, FILE *, rtx *, rtx, bool *, int, int *);
-static int redefine_groups (FILE *, int, rtx, rtx);
-static int pad_groups (FILE *, int, rtx, rtx);
-static void rs6000_sched_finish (FILE *, int);
-static int rs6000_sched_reorder (FILE *, int, rtx *, int *, int);
-static int rs6000_sched_reorder2 (FILE *, int, rtx *, int *, int);
-static int rs6000_use_sched_lookahead (void);
-static int rs6000_use_sched_lookahead_guard (rtx);
-static void * rs6000_alloc_sched_context (void);
-static void rs6000_init_sched_context (void *, bool);
-static void rs6000_set_sched_context (void *);
-static void rs6000_free_sched_context (void *);
-static tree rs6000_builtin_reciprocal (unsigned int, bool, bool);
-static tree rs6000_builtin_mask_for_load (void);
-static tree rs6000_builtin_mul_widen_even (tree);
-static tree rs6000_builtin_mul_widen_odd (tree);
-static bool rs6000_builtin_support_vector_misalignment (enum
- machine_mode,
- const_tree,
- int, bool);
-static int rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt,
- tree, int);
-static enum machine_mode rs6000_preferred_simd_mode (enum machine_mode);
-
-static void def_builtin (const char *, tree, enum rs6000_builtins);
-static bool rs6000_vector_alignment_reachable (const_tree, bool);
-static void rs6000_init_builtins (void);
-static tree rs6000_builtin_decl (unsigned, bool);
-
-static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
-static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
-static rtx rs6000_expand_ternop_builtin (enum insn_code, tree, rtx);
-static rtx rs6000_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
static void altivec_init_builtins (void);
-static unsigned builtin_hash_function (const void *);
-static int builtin_hash_eq (const void *, const void *);
static tree builtin_function_type (enum machine_mode, enum machine_mode,
enum machine_mode, enum machine_mode,
enum rs6000_builtins, const char *name);
static void rs6000_common_init_builtins (void);
-static void rs6000_init_libfuncs (void);
-
static void paired_init_builtins (void);
-static rtx paired_expand_builtin (tree, rtx, bool *);
-static rtx paired_expand_lv_builtin (enum insn_code, tree, rtx);
-static rtx paired_expand_stv_builtin (enum insn_code, tree);
static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
-
static void spe_init_builtins (void);
-static rtx spe_expand_builtin (tree, rtx, bool *);
-static rtx spe_expand_stv_builtin (enum insn_code, tree);
static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
static rs6000_stack_t *rs6000_stack_info (void);
-static void debug_stack_info (rs6000_stack_t *);
-
-static rtx altivec_expand_builtin (tree, rtx, bool *);
-static rtx altivec_expand_ld_builtin (tree, rtx, bool *);
-static rtx altivec_expand_st_builtin (tree, rtx, bool *);
-static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
-static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
-static rtx altivec_expand_predicate_builtin (enum insn_code, tree, rtx);
-static rtx altivec_expand_stv_builtin (enum insn_code, tree);
-static rtx altivec_expand_vec_init_builtin (tree, tree, rtx);
-static rtx altivec_expand_vec_set_builtin (tree);
-static rtx altivec_expand_vec_ext_builtin (tree, rtx);
-static int get_element_number (tree, tree);
-static void rs6000_option_override (void);
-static int rs6000_loop_align_max_skip (rtx);
-static int first_altivec_reg_to_save (void);
-static unsigned int compute_vrsave_mask (void);
-static void compute_save_world_info (rs6000_stack_t *info_ptr);
static void is_altivec_return_reg (rtx, void *);
-static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int);
int easy_vector_constant (rtx, enum machine_mode);
-static rtx rs6000_dwarf_register_span (rtx);
-static void rs6000_init_dwarf_reg_sizes_extra (tree);
-static rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode);
static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
-static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
-static rtx rs6000_delegitimize_address (rtx);
-static bool rs6000_const_not_ok_for_debug_p (rtx);
-static rtx rs6000_tls_get_addr (void);
-static rtx rs6000_got_sym (void);
static int rs6000_tls_symbol_ref_1 (rtx *, void *);
-static const char *rs6000_get_some_local_dynamic_name (void);
static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
-static rtx rs6000_complex_function_value (enum machine_mode);
-static rtx rs6000_spe_function_arg (const CUMULATIVE_ARGS *,
- enum machine_mode, const_tree);
-static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *,
- HOST_WIDE_INT, int);
-static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *,
- const_tree,
- HOST_WIDE_INT);
-static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *,
- HOST_WIDE_INT,
- rtx[], int *);
-static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *,
- const_tree, HOST_WIDE_INT,
- rtx[], int *);
-static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree, bool, bool);
-static rtx rs6000_mixed_function_arg (enum machine_mode, const_tree, int);
-static void rs6000_function_arg_advance (cumulative_args_t, enum machine_mode,
- const_tree, bool);
-static rtx rs6000_function_arg (cumulative_args_t, enum machine_mode,
- const_tree, bool);
-static unsigned int rs6000_function_arg_boundary (enum machine_mode,
- const_tree);
-static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
-static void setup_incoming_varargs (cumulative_args_t,
- enum machine_mode, tree,
- int *, int);
-static bool rs6000_pass_by_reference (cumulative_args_t, enum machine_mode,
- const_tree, bool);
-static int rs6000_arg_partial_bytes (cumulative_args_t, enum machine_mode,
- tree, bool);
-static const char *invalid_arg_for_unprototyped_fn (const_tree, const_tree, const_tree);
+static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
+ bool, bool);
#if TARGET_MACHO
static void macho_branch_islands (void);
-static int no_previous_def (tree function_name);
-static tree get_prev_label (tree function_name);
-static void rs6000_darwin_file_start (void);
#endif
-
-static tree rs6000_build_builtin_va_list (void);
-static void rs6000_va_start (tree, rtx);
-static tree rs6000_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
-static bool rs6000_must_pass_in_stack (enum machine_mode, const_tree);
-static bool rs6000_scalar_mode_supported_p (enum machine_mode);
-static bool rs6000_vector_mode_supported_p (enum machine_mode);
-static rtx rs6000_emit_vector_compare_inner (enum rtx_code, rtx, rtx);
-static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx,
- enum machine_mode);
-static tree rs6000_stack_protect_fail (void);
-
static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
int, int *);
-
static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
int, int, int *);
-
-rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
- int, int *)
- = rs6000_legitimize_reload_address;
-
-static bool rs6000_mode_dependent_address_p (const_rtx);
static bool rs6000_mode_dependent_address (const_rtx);
static bool rs6000_debug_mode_dependent_address (const_rtx);
-static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
- = rs6000_mode_dependent_address;
-
static enum reg_class rs6000_secondary_reload_class (enum reg_class,
enum machine_mode, rtx);
static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
enum machine_mode,
rtx);
-enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
- enum machine_mode, rtx)
- = rs6000_secondary_reload_class;
-
static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
static enum reg_class rs6000_debug_preferred_reload_class (rtx,
enum reg_class);
-enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
- = rs6000_preferred_reload_class;
-
static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
enum machine_mode);
-
static bool rs6000_debug_secondary_memory_needed (enum reg_class,
enum reg_class,
enum machine_mode);
-
-bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
- enum machine_mode)
- = rs6000_secondary_memory_needed;
-
static bool rs6000_cannot_change_mode_class (enum machine_mode,
enum machine_mode,
enum reg_class);
static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
enum machine_mode,
enum reg_class);
+static bool rs6000_save_toc_in_prologue_p (void);
+
+rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
+ int, int *)
+ = rs6000_legitimize_reload_address;
+
+static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
+ = rs6000_mode_dependent_address;
+
+enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
+ enum machine_mode, rtx)
+ = rs6000_secondary_reload_class;
+
+enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
+ = rs6000_preferred_reload_class;
+
+bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
+ enum machine_mode)
+ = rs6000_secondary_memory_needed;
bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
enum machine_mode,
enum reg_class)
= rs6000_cannot_change_mode_class;
-static reg_class_t rs6000_secondary_reload (bool, rtx, reg_class_t,
- enum machine_mode,
- struct secondary_reload_info *);
-
const int INSN_NOT_AVAILABLE = -1;
-static enum machine_mode rs6000_eh_return_filter_mode (void);
-static bool rs6000_can_eliminate (const int, const int);
-static void rs6000_conditional_register_usage (void);
-static void rs6000_trampoline_init (rtx, tree, rtx);
-static bool rs6000_cannot_force_const_mem (enum machine_mode, rtx);
-static bool rs6000_legitimate_constant_p (enum machine_mode, rtx);
-static bool rs6000_save_toc_in_prologue_p (void);
-static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
-static void rs6000_set_up_by_prologue (struct hard_reg_set_container *);
/* Hash table stuff for keeping track of TOC entries. */
static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
-static bool rs6000_valid_attribute_p (tree, tree, tree, int);
-static void rs6000_function_specific_save (struct cl_target_option *);
-static void rs6000_function_specific_restore (struct cl_target_option *);
-static void rs6000_function_specific_print (FILE *, int,
- struct cl_target_option *);
-static bool rs6000_can_inline_p (tree, tree);
-static void rs6000_set_current_function (tree);
-
\f
/* Default register names. */
char rs6000_reg_names[][8] =
#undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
-#undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
-#define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
-#undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
-#define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
#undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
rs6000_builtin_support_vector_misalignment
#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
rs6000_preferred_simd_mode
+#undef TARGET_VECTORIZE_INIT_COST
+#define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
+#undef TARGET_VECTORIZE_ADD_STMT_COST
+#define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
+#undef TARGET_VECTORIZE_FINISH_COST
+#define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
+#undef TARGET_VECTORIZE_DESTROY_COST_DATA
+#define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS rs6000_init_builtins
#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS rs6000_rtx_costs
#undef TARGET_ADDRESS_COST
-#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
+#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
#undef TARGET_DWARF_REGISTER_SPAN
#define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
#undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
#define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
+#undef TARGET_MEMBER_TYPE_FORCES_BLK
+#define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
+
/* On rs6000, function arguments are promoted, as are function return
values. */
#undef TARGET_PROMOTE_FUNCTION_MODE
/* Simplifications for entries below. */
enum {
- POWERPC_BASE_MASK = MASK_POWERPC | MASK_NEW_MNEMONICS,
- POWERPC_7400_MASK = POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_ALTIVEC
+ POWERPC_7400_MASK = MASK_PPC_GFXOPT | MASK_ALTIVEC
};
/* Some OSs don't support saving the high part of 64-bit registers on context
the user's specification. */
enum {
- POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
- POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
+ POWERPC_MASKS = (MASK_PPC_GPOPT | MASK_STRICT_ALIGN
| MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
| MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
| MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
- rs6000_debug_reg_print (MQ_REGNO, MQ_REGNO, "mq");
rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
rs6000_regno_regclass[r] = CR_REGS;
- rs6000_regno_regclass[MQ_REGNO] = MQ_REGS;
rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
rs6000_regno_regclass[CA_REGNO] = CA_REGS;
/* TODO add SPE and paired floating point vector support. */
- /* Register class constaints for the constraints that depend on compile
+ /* Register class constraints for the constraints that depend on compile
switches. */
if (TARGET_HARD_FLOAT && TARGET_FPRS)
rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
/* Unless the user (not the configurer) has explicitly overridden
it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
- G4 unless targetting the kernel. */
+ G4 unless targeting the kernel. */
if (!flag_mkernel
&& !flag_apple_kext
&& strverscmp (darwin_macosx_version_min, "10.5") >= 0
| ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
| ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
| ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
- | ((TARGET_POWERPC) ? RS6000_BTM_POWERPC : 0)
| ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0));
}
{
bool ret = true;
bool have_cpu = false;
- const char *default_cpu = OPTION_TARGET_CPU_DEFAULT;
+
+ /* The default cpu requested at configure time, if any. */
+ const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
+
int set_masks;
int cpu_index;
int tune_index;
warning (0, "-malign-power is not supported for 64-bit Darwin;"
" it is incompatible with the installed C and C++ libraries");
- if (global_options_set.x_rs6000_spe_abi
- && rs6000_spe_abi
- && !TARGET_SPE_ABI)
- error ("not configured for SPE ABI");
-
/* Numerous experiment shows that IRA based loop pressure
calculation works better for RTL loop invariant motion on targets
with enough (>= 32) registers. It is an expensive optimization.
rs6000_pointer_size = 32;
}
- set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
+ set_masks = POWERPC_MASKS | MASK_SOFT_FLOAT;
#ifdef OS_MISSING_POWERPC64
if (OS_MISSING_POWERPC64)
set_masks &= ~MASK_POWERPC64;
/* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
the cpu in a target attribute or pragma, but did not specify a tuning
option, use the cpu for the tuning option rather than the option specified
- with -mtune on the command line. */
+ with -mtune on the command line. Process a '--with-cpu' configuration
+ request as an implicit --cpu. */
if (rs6000_cpu_index >= 0)
{
cpu_index = rs6000_cpu_index;
}
else
{
- if (!default_cpu)
- default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
+ const char *default_cpu =
+ (implicit_cpu ? implicit_cpu
+ : (TARGET_POWERPC64 ? "powerpc64" : "powerpc"));
rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
+ have_cpu = implicit_cpu != 0;
}
gcc_assert (cpu_index >= 0);
gcc_assert (tune_index >= 0);
rs6000_cpu = processor_target_table[tune_index].processor;
+ /* Pick defaults for SPE related control flags. Do this early to make sure
+ that the TARGET_ macros are representative ASAP. */
+ {
+ int spe_capable_cpu =
+ (rs6000_cpu == PROCESSOR_PPC8540
+ || rs6000_cpu == PROCESSOR_PPC8548);
+
+ if (!global_options_set.x_rs6000_spe_abi)
+ rs6000_spe_abi = spe_capable_cpu;
+
+ if (!global_options_set.x_rs6000_spe)
+ rs6000_spe = spe_capable_cpu;
+
+ if (!global_options_set.x_rs6000_float_gprs)
+ rs6000_float_gprs =
+ (rs6000_cpu == PROCESSOR_PPC8540 ? 1
+ : rs6000_cpu == PROCESSOR_PPC8548 ? 2
+ : 0);
+ }
+
+ if (global_options_set.x_rs6000_spe_abi
+ && rs6000_spe_abi
+ && !TARGET_SPE_ABI)
+ error ("not configured for SPE ABI");
+
+ if (global_options_set.x_rs6000_spe
+ && rs6000_spe
+ && !TARGET_SPE)
+ error ("not configured for SPE instruction set");
+
+ if (main_target_opt != NULL
+ && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
+ || (main_target_opt->x_rs6000_spe != rs6000_spe)
+ || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
+ error ("target attribute or pragma changes SPE ABI");
+
if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
- || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64)
+ || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
+ || rs6000_cpu == PROCESSOR_PPCE5500)
{
if (TARGET_ALTIVEC)
error ("AltiVec not supported in this target");
if (TARGET_SPE)
error ("SPE not supported in this target");
}
+ if (rs6000_cpu == PROCESSOR_PPCE6500)
+ {
+ if (TARGET_SPE)
+ error ("SPE not supported in this target");
+ }
/* Disable Cell microcode if we are optimizing for the Cell
and not optimizing for size. */
user's opinion, though. */
if (rs6000_block_move_inline_limit == 0
&& (rs6000_cpu == PROCESSOR_PPCE500MC
- || rs6000_cpu == PROCESSOR_PPCE500MC64))
+ || rs6000_cpu == PROCESSOR_PPCE500MC64
+ || rs6000_cpu == PROCESSOR_PPCE5500
+ || rs6000_cpu == PROCESSOR_PPCE6500))
rs6000_block_move_inline_limit = 128;
/* store_one_arg depends on expand_block_move to handle at least the
SUB3TARGET_OVERRIDE_OPTIONS;
#endif
- if (TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC
- || rs6000_cpu == PROCESSOR_PPCE500MC64)
+ /* For the E500 family of cores, reset the single/double FP flags to let us
+ check that they remain constant across attributes or pragmas. Also,
+ clear a possible request for string instructions, not supported and which
+ we might have silently queried above for -Os.
+
+ For other families, clear ISEL in case it was set implicitly.
+ */
+
+ switch (rs6000_cpu)
{
- /* The e500 and e500mc do not have string instructions, and we set
- MASK_STRING above when optimizing for size. */
- if ((target_flags & MASK_STRING) != 0)
- target_flags = target_flags & ~MASK_STRING;
+ case PROCESSOR_PPC8540:
+ case PROCESSOR_PPC8548:
+ case PROCESSOR_PPCE500MC:
+ case PROCESSOR_PPCE500MC64:
+ case PROCESSOR_PPCE5500:
+ case PROCESSOR_PPCE6500:
+
+ rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
+ rs6000_double_float = TARGET_E500_DOUBLE;
+
+ target_flags &= ~MASK_STRING;
+
+ break;
+
+ default:
+
+ if (have_cpu && !(target_flags_explicit & MASK_ISEL))
+ target_flags &= ~MASK_ISEL;
+
+ break;
}
- else if (global_options_set.x_rs6000_cpu_index)
+
+ if (main_target_opt)
{
- /* For the powerpc-eabispe configuration, we set all these by
- default, so let's unset them if we manually set another
- CPU that is not the E500. */
- if (main_target_opt != NULL
- && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
- || (main_target_opt->x_rs6000_spe != rs6000_spe)
- || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
- error ("target attribute or pragma changes SPE ABI");
- else
- {
- if (!global_options_set.x_rs6000_spe_abi)
- rs6000_spe_abi = 0;
- if (!global_options_set.x_rs6000_spe)
- rs6000_spe = 0;
- if (!global_options_set.x_rs6000_float_gprs)
- rs6000_float_gprs = 0;
- }
- if (!(target_flags_explicit & MASK_ISEL))
- target_flags &= ~MASK_ISEL;
+ if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
+ error ("target attribute or pragma changes single precision floating "
+ "point");
+ if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
+ error ("target attribute or pragma changes double precision floating "
+ "point");
}
/* Detect invalid option combinations with E500. */
|| rs6000_cpu == PROCESSOR_POWER6
|| rs6000_cpu == PROCESSOR_POWER7
|| rs6000_cpu == PROCESSOR_PPCE500MC
- || rs6000_cpu == PROCESSOR_PPCE500MC64);
+ || rs6000_cpu == PROCESSOR_PPCE500MC64
+ || rs6000_cpu == PROCESSOR_PPCE5500
+ || rs6000_cpu == PROCESSOR_PPCE6500);
/* Allow debug switches to override the above settings. These are set to -1
in rs6000.opt to indicate the user hasn't directly set the switch. */
/* Handle -msched-costly-dep option. */
rs6000_sched_costly_dep
- = (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
+ = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
if (rs6000_sched_costly_dep_str)
{
else
switch (rs6000_cpu)
{
- case PROCESSOR_RIOS1:
- rs6000_cost = &rios1_cost;
- break;
-
- case PROCESSOR_RIOS2:
- rs6000_cost = &rios2_cost;
- break;
-
case PROCESSOR_RS64A:
rs6000_cost = &rs64a_cost;
break;
break;
case PROCESSOR_PPC8540:
+ case PROCESSOR_PPC8548:
rs6000_cost = &ppc8540_cost;
break;
rs6000_cost = &ppce500mc64_cost;
break;
+ case PROCESSOR_PPCE5500:
+ rs6000_cost = &ppce5500_cost;
+ break;
+
+ case PROCESSOR_PPCE6500:
+ rs6000_cost = &ppce6500_cost;
+ break;
+
case PROCESSOR_TITAN:
rs6000_cost = &titan_cost;
break;
&& rs6000_single_float == 0 && rs6000_double_float == 0)
rs6000_single_float = rs6000_double_float = 1;
- /* Reset single and double FP flags if target is E500. */
- if (TARGET_E500)
- {
- rs6000_single_float = rs6000_double_float = 0;
- if (TARGET_E500_SINGLE)
- rs6000_single_float = 1;
- if (TARGET_E500_DOUBLE)
- rs6000_single_float = rs6000_double_float = 1;
- }
-
- if (main_target_opt)
- {
- if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
- error ("target attribute or pragma changes single precision floating "
- "point");
- if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
- error ("target attribute or pragma changes double precision floating "
- "point");
- }
-
/* If not explicitly specified via option, decide whether to generate indexed
load/store instructions. */
if (TARGET_AVOID_XFORM == -1)
return (1 << rs6000_loop_align (label)) - 1;
}
-/* Implement targetm.vectorize.builtin_mul_widen_even. */
-static tree
-rs6000_builtin_mul_widen_even (tree type)
-{
- if (!TARGET_ALTIVEC)
- return NULL_TREE;
-
- switch (TYPE_MODE (type))
- {
- case V8HImode:
- return TYPE_UNSIGNED (type)
- ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUH_UNS]
- : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESH];
-
- case V16QImode:
- return TYPE_UNSIGNED (type)
- ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUB_UNS]
- : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESB];
- default:
- return NULL_TREE;
- }
-}
-
-/* Implement targetm.vectorize.builtin_mul_widen_odd. */
-static tree
-rs6000_builtin_mul_widen_odd (tree type)
-{
- if (!TARGET_ALTIVEC)
- return NULL_TREE;
-
- switch (TYPE_MODE (type))
- {
- case V8HImode:
- return TYPE_UNSIGNED (type)
- ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUH_UNS]
- : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSH];
-
- case V16QImode:
- return TYPE_UNSIGNED (type)
- ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUB_UNS]
- : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSB];
- default:
- return NULL_TREE;
- }
-}
-
-
/* Return true iff, data reference of TYPE can reach vector alignment (16)
after applying N number of iterations. This routine does not determine
how may iterations are required to reach desired alignment. */
/* Return true if the vector misalignment factor is supported by the
target. */
-bool
+static bool
rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
const_tree type,
int misalignment,
tree vectype, int misalign)
{
unsigned elements;
+ tree elem_type;
switch (type_of_cost)
{
case vec_perm:
if (TARGET_VSX)
- return 4;
+ return 3;
else
return 1;
case vec_promote_demote:
if (TARGET_VSX)
- return 5;
+ return 4;
else
return 1;
return 2;
+ case vec_construct:
+ elements = TYPE_VECTOR_SUBPARTS (vectype);
+ elem_type = TREE_TYPE (vectype);
+ /* 32-bit vectors loaded into registers are stored as double
+ precision, so we need n/2 converts in addition to the usual
+ n/2 merges to construct a vector of short floats from them. */
+ if (SCALAR_FLOAT_TYPE_P (elem_type)
+ && TYPE_PRECISION (elem_type) == 32)
+ return elements + 1;
+ else
+ return elements / 2 + 1;
+
default:
gcc_unreachable ();
}
return word_mode;
}
+typedef struct _rs6000_cost_data
+{
+ struct loop *loop_info;
+ unsigned cost[3];
+} rs6000_cost_data;
+
+/* Test for likely overcommitment of vector hardware resources. If a
+ loop iteration is relatively large, and too large a percentage of
+ instructions in the loop are vectorized, the cost model may not
+ adequately reflect delays from unavailable vector resources.
+ Penalize the loop body cost for this case. */
+
+static void
+rs6000_density_test (rs6000_cost_data *data)
+{
+ const int DENSITY_PCT_THRESHOLD = 85;
+ const int DENSITY_SIZE_THRESHOLD = 70;
+ const int DENSITY_PENALTY = 10;
+ struct loop *loop = data->loop_info;
+ basic_block *bbs = get_loop_body (loop);
+ int nbbs = loop->num_nodes;
+ int vec_cost = data->cost[vect_body], not_vec_cost = 0;
+ int i, density_pct;
+
+ for (i = 0; i < nbbs; i++)
+ {
+ basic_block bb = bbs[i];
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+
+ if (!STMT_VINFO_RELEVANT_P (stmt_info)
+ && !STMT_VINFO_IN_PATTERN_P (stmt_info))
+ not_vec_cost++;
+ }
+ }
+
+ free (bbs);
+ density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
+
+ if (density_pct > DENSITY_PCT_THRESHOLD
+ && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
+ {
+ data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump,
+ "density %d%%, cost %d exceeds threshold, penalizing "
+ "loop body cost by %d%%", density_pct,
+ vec_cost + not_vec_cost, DENSITY_PENALTY);
+ }
+}
+
+/* Implement targetm.vectorize.init_cost. */
+
+static void *
+rs6000_init_cost (struct loop *loop_info)
+{
+ rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
+ data->loop_info = loop_info;
+ data->cost[vect_prologue] = 0;
+ data->cost[vect_body] = 0;
+ data->cost[vect_epilogue] = 0;
+ return data;
+}
+
+/* Implement targetm.vectorize.add_stmt_cost. */
+
+static unsigned
+rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
+ struct _stmt_vec_info *stmt_info, int misalign,
+ enum vect_cost_model_location where)
+{
+ rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
+ unsigned retval = 0;
+
+ if (flag_vect_cost_model)
+ {
+ tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
+ int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
+ misalign);
+ /* Statements in an inner loop relative to the loop being
+ vectorized are weighted more heavily. The value here is
+ arbitrary and could potentially be improved with analysis. */
+ if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
+ count *= 50; /* FIXME. */
+
+ retval = (unsigned) (count * stmt_cost);
+ cost_data->cost[where] += retval;
+ }
+
+ return retval;
+}
+
+/* Implement targetm.vectorize.finish_cost. */
+
+static void
+rs6000_finish_cost (void *data, unsigned *prologue_cost,
+ unsigned *body_cost, unsigned *epilogue_cost)
+{
+ rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
+
+ if (cost_data->loop_info)
+ rs6000_density_test (cost_data);
+
+ *prologue_cost = cost_data->cost[vect_prologue];
+ *body_cost = cost_data->cost[vect_body];
+ *epilogue_cost = cost_data->cost[vect_epilogue];
+}
+
+/* Implement targetm.vectorize.destroy_cost_data. */
+
+static void
+rs6000_destroy_cost_data (void *data)
+{
+ free (data);
+}
+
/* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
library with vectorized intrinsics. */
int
num_insns_constant_wide (HOST_WIDE_INT value)
{
- /* signed constant loadable with {cal|addi} */
+ /* signed constant loadable with addi */
if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
return 1;
- /* constant loadable with {cau|addis} */
+ /* constant loadable with addis */
else if ((value & 0xffff) == 0
&& (value >> 31 == -1 || value >> 31 == 0))
return 1;
of 64-bit items is not supported on Altivec. */
if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
{
- mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
XVECEXP (vals, 0, 0));
x = gen_rtx_UNSPEC (VOIDmode,
/* Construct the vector in memory one field at a time
and load the whole vector. */
- mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
for (i = 0; i < n_elts; i++)
emit_move_insn (adjust_address_nv (mem, inner_mode,
i * GET_MODE_SIZE (inner_mode)),
}
/* Load single variable value. */
- mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
x = gen_rtx_UNSPEC (VOIDmode,
gen_rtvec (1, const0_rtx), UNSPEC_LVE);
}
/* Allocate mode-sized buffer. */
- mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
emit_move_insn (mem, vec);
|| (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
}
+/* Given an address, return a constant offset term if one exists. */
+
+static rtx
+address_offset (rtx op)
+{
+ if (GET_CODE (op) == PRE_INC
+ || GET_CODE (op) == PRE_DEC)
+ op = XEXP (op, 0);
+ else if (GET_CODE (op) == PRE_MODIFY
+ || GET_CODE (op) == LO_SUM)
+ op = XEXP (op, 1);
+
+ if (GET_CODE (op) == CONST)
+ op = XEXP (op, 0);
+
+ if (GET_CODE (op) == PLUS)
+ op = XEXP (op, 1);
+
+ if (CONST_INT_P (op))
+ return op;
+
+ return NULL_RTX;
+}
+
+/* Return true if the MEM operand is a memory operand suitable for use
+ with a (full width, possibly multiple) gpr load/store. On
+ powerpc64 this means the offset must be divisible by 4.
+ Implements 'Y' constraint.
+
+ Accept direct, indexed, offset, lo_sum and tocref. Since this is
+ a constraint function we know the operand has satisfied a suitable
+ memory predicate. Also accept some odd rtl generated by reload
+ (see rs6000_legitimize_reload_address for various forms). It is
+ important that reload rtl be accepted by appropriate constraints
+ but not by the operand predicate.
+
+ Offsetting a lo_sum should not be allowed, except where we know by
+ alignment that a 32k boundary is not crossed, but see the ???
+ comment in rs6000_legitimize_reload_address. Note that by
+ "offsetting" here we mean a further offset to access parts of the
+ MEM. It's fine to have a lo_sum where the inner address is offset
+ from a sym, since the same sym+offset will appear in the high part
+ of the address calculation. */
+
+bool
+mem_operand_gpr (rtx op, enum machine_mode mode)
+{
+ unsigned HOST_WIDE_INT offset;
+ int extra;
+ rtx addr = XEXP (op, 0);
+
+ op = address_offset (addr);
+ if (op == NULL_RTX)
+ return true;
+
+ offset = INTVAL (op);
+ if (TARGET_POWERPC64 && (offset & 3) != 0)
+ return false;
+
+ if (GET_CODE (addr) == LO_SUM)
+ /* We know by alignment that ABI_AIX medium/large model toc refs
+ will not cross a 32k boundary, since all entries in the
+ constant pool are naturally aligned and we check alignment for
+ other medium model toc-relative addresses. For ABI_V4 and
+ ABI_DARWIN lo_sum addresses, we just check that 64-bit
+ offsets are 4-byte aligned. */
+ return true;
+
+ extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
+ gcc_assert (extra >= 0);
+ return offset + 0x8000 < 0x10000u - extra;
+}
\f
/* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
}
-static rtx tocrel_base, tocrel_offset;
+static const_rtx tocrel_base, tocrel_offset;
+
+/* Return true if OP is a toc pointer relative address (the output
+ of create_TOC_reference). If STRICT, do not match high part or
+ non-split -mcmodel=large/medium toc pointer relative addresses. */
bool
-toc_relative_expr_p (rtx op)
+toc_relative_expr_p (const_rtx op, bool strict)
{
- if (GET_CODE (op) != CONST)
+ if (!TARGET_TOC)
return false;
- split_const (op, &tocrel_base, &tocrel_offset);
+ if (TARGET_CMODEL != CMODEL_SMALL)
+ {
+ /* Only match the low part. */
+ if (GET_CODE (op) == LO_SUM
+ && REG_P (XEXP (op, 0))
+ && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
+ op = XEXP (op, 1);
+ else if (strict)
+ return false;
+ }
+
+ tocrel_base = op;
+ tocrel_offset = const0_rtx;
+ if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
+ {
+ tocrel_base = XEXP (op, 0);
+ tocrel_offset = XEXP (op, 1);
+ }
+
return (GET_CODE (tocrel_base) == UNSPEC
&& XINT (tocrel_base, 1) == UNSPEC_TOCREL);
}
legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
bool strict)
{
- return (TARGET_TOC
- && (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
- && GET_CODE (XEXP (x, 0)) == REG
- && (REGNO (XEXP (x, 0)) == TOC_REGISTER
- || ((TARGET_MINIMAL_TOC
- || TARGET_CMODEL != CMODEL_SMALL)
- && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict)))
- && toc_relative_expr_p (XEXP (x, 1))
+ return (toc_relative_expr_p (x, strict)
&& (TARGET_CMODEL != CMODEL_MEDIUM
|| constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
|| mode == QImode
#define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
bool
-rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
+rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
+ bool strict, bool worst_case)
{
- unsigned HOST_WIDE_INT offset, extra;
+ unsigned HOST_WIDE_INT offset;
+ unsigned int extra;
if (GET_CODE (x) != PLUS)
return false;
- if (GET_CODE (XEXP (x, 0)) != REG)
+ if (!REG_P (XEXP (x, 0)))
return false;
if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
return false;
return SPE_CONST_OFFSET_OK (offset);
case DFmode:
- if (TARGET_E500_DOUBLE)
- return SPE_CONST_OFFSET_OK (offset);
-
- /* If we are using VSX scalar loads, restrict ourselves to reg+reg
- addressing. */
- if (VECTOR_MEM_VSX_P (DFmode))
- return false;
-
case DDmode:
case DImode:
/* On e500v2, we may have:
if (TARGET_E500_DOUBLE)
return SPE_CONST_OFFSET_OK (offset);
- if (mode == DFmode || mode == DDmode || !TARGET_POWERPC64)
+ /* If we are using VSX scalar loads, restrict ourselves to reg+reg
+ addressing. */
+ if (mode == DFmode && VECTOR_MEM_VSX_P (DFmode))
+ return false;
+
+ if (!worst_case)
+ break;
+ if (!TARGET_POWERPC64)
extra = 4;
else if (offset & 3)
return false;
break;
case TFmode:
+ case TDmode:
+ case TImode:
if (TARGET_E500_DOUBLE)
return (SPE_CONST_OFFSET_OK (offset)
&& SPE_CONST_OFFSET_OK (offset + 8));
- case TDmode:
- case TImode:
- if (mode == TFmode || mode == TDmode || !TARGET_POWERPC64)
+ extra = 8;
+ if (!worst_case)
+ break;
+ if (!TARGET_POWERPC64)
extra = 12;
else if (offset & 3)
return false;
- else
- extra = 8;
break;
default:
if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
return false;
/* Restrict addressing for DI because of our SUBREG hackery. */
- if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
- || mode == DDmode || mode == TDmode
- || mode == DImode))
+ if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
return false;
x = XEXP (x, 1);
return false;
if (GET_MODE_NUNITS (mode) != 1)
return false;
- if (GET_MODE_BITSIZE (mode) > 64
- || (GET_MODE_BITSIZE (mode) > 32 && !TARGET_POWERPC64
- && !(TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
- && (mode == DFmode || mode == DDmode))))
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && !(/* ??? Assume floating point reg based on mode? */
+ TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && (mode == DFmode || mode == DDmode)))
return false;
return CONSTANT_P (x);
rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
enum machine_mode mode)
{
- unsigned int extra = 0;
+ unsigned int extra;
if (!reg_offset_addressing_ok_p (mode))
{
return rs6000_legitimize_tls_address (x, model);
}
+ extra = 0;
switch (mode)
{
- case DFmode:
- case DDmode:
- extra = 4;
- break;
- case DImode:
- if (!TARGET_POWERPC64)
- extra = 4;
- break;
case TFmode:
case TDmode:
- extra = 12;
- break;
case TImode:
- extra = TARGET_POWERPC64 ? 8 : 12;
+ /* As in legitimate_offset_address_p we do not assume
+ worst-case. The mode here is just a hint as to the registers
+ used. A TImode is usually in gprs, but may actually be in
+ fprs. Leave worst-case scenario for reload to handle via
+ insn constraints. */
+ extra = 8;
break;
default:
break;
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
>= 0x10000 - extra)
- && !((TARGET_POWERPC64
- && (mode == DImode || mode == TImode)
- && (INTVAL (XEXP (x, 1)) & 3) != 0)
- || SPE_VECTOR_MODE (mode)
- || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
- || mode == DImode || mode == DDmode
- || mode == TDmode))))
+ && !(SPE_VECTOR_MODE (mode)
+ || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
{
HOST_WIDE_INT high_int, low_int;
rtx sum;
&& GET_CODE (XEXP (x, 0)) == REG
&& GET_CODE (XEXP (x, 1)) != CONST_INT
&& GET_MODE_NUNITS (mode) == 1
- && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
- || TARGET_POWERPC64
- || ((mode != DImode && mode != DFmode && mode != DDmode)
- || (TARGET_E500_DOUBLE && mode != DDmode)))
- && (TARGET_POWERPC64 || mode != DImode)
- && !avoiding_indexed_address_p (mode)
- && mode != TImode
- && mode != TFmode
- && mode != TDmode)
+ && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ || (/* ??? Assume floating point reg based on mode? */
+ (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
+ && (mode == DFmode || mode == DDmode)))
+ && !avoiding_indexed_address_p (mode))
{
return gen_rtx_PLUS (Pmode, XEXP (x, 0),
force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
}
else if (SPE_VECTOR_MODE (mode)
- || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
- || mode == DDmode || mode == TDmode
- || mode == DImode)))
+ || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
{
if (mode == DImode)
return x;
return force_reg (Pmode, x);
}
- else if (TARGET_ELF
+ else if ((TARGET_ELF
+#if TARGET_MACHO
+ || !MACHO_DYNAMIC_NO_PIC_P
+#endif
+ )
&& TARGET_32BIT
&& TARGET_NO_TOC
&& ! flag_pic
&& GET_CODE (x) != CONST_DOUBLE
&& CONSTANT_P (x)
&& GET_MODE_NUNITS (mode) == 1
- && (GET_MODE_BITSIZE (mode) <= 32
- || ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
+ && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ || (/* ??? Assume floating point reg based on mode? */
+ (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
&& (mode == DFmode || mode == DDmode))))
{
rtx reg = gen_reg_rtx (Pmode);
- emit_insn (gen_elf_high (reg, x));
- return gen_rtx_LO_SUM (Pmode, reg, x);
- }
- else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
- && ! flag_pic
-#if TARGET_MACHO
- && ! MACHO_DYNAMIC_NO_PIC_P
-#endif
- && GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_DOUBLE
- && CONSTANT_P (x)
- && GET_MODE_NUNITS (mode) == 1
- && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
- || (mode != DFmode && mode != DDmode))
- && mode != DImode
- && mode != TImode)
- {
- rtx reg = gen_reg_rtx (Pmode);
- emit_insn (gen_macho_high (reg, x));
+ if (TARGET_ELF)
+ emit_insn (gen_elf_high (reg, x));
+ else
+ emit_insn (gen_macho_high (reg, x));
return gen_rtx_LO_SUM (Pmode, reg, x);
}
else if (TARGET_TOC
&& GET_CODE (x) == SYMBOL_REF
&& constant_pool_expr_p (x)
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
- {
- rtx reg = TARGET_CMODEL != CMODEL_SMALL ? gen_reg_rtx (Pmode) : NULL_RTX;
- return create_TOC_reference (x, reg);
- }
+ return create_TOC_reference (x, NULL_RTX);
else
return x;
}
/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
We need to emit DTP-relative relocations. */
+static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
static void
rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
{
static rtx
rs6000_delegitimize_address (rtx orig_x)
{
- rtx x, y;
+ rtx x, y, offset;
orig_x = delegitimize_mem_from_attrs (orig_x);
x = orig_x;
if (MEM_P (x))
x = XEXP (x, 0);
- if (GET_CODE (x) == (TARGET_CMODEL != CMODEL_SMALL ? LO_SUM : PLUS)
- && GET_CODE (XEXP (x, 1)) == CONST)
+ y = x;
+ if (TARGET_CMODEL != CMODEL_SMALL
+ && GET_CODE (y) == LO_SUM)
+ y = XEXP (y, 1);
+
+ offset = NULL_RTX;
+ if (GET_CODE (y) == PLUS
+ && GET_MODE (y) == Pmode
+ && CONST_INT_P (XEXP (y, 1)))
{
- rtx offset = NULL_RTX;
+ offset = XEXP (y, 1);
+ y = XEXP (y, 0);
+ }
- y = XEXP (XEXP (x, 1), 0);
- if (GET_CODE (y) == PLUS
- && GET_MODE (y) == Pmode
- && CONST_INT_P (XEXP (y, 1)))
+ if (GET_CODE (y) == UNSPEC
+ && XINT (y, 1) == UNSPEC_TOCREL)
+ {
+#ifdef ENABLE_CHECKING
+ if (REG_P (XVECEXP (y, 0, 1))
+ && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
{
- offset = XEXP (y, 1);
- y = XEXP (y, 0);
+ /* All good. */
}
- if (GET_CODE (y) == UNSPEC
- && XINT (y, 1) == UNSPEC_TOCREL
- && ((GET_CODE (XEXP (x, 0)) == REG
- && (REGNO (XEXP (x, 0)) == TOC_REGISTER
- || TARGET_MINIMAL_TOC
- || TARGET_CMODEL != CMODEL_SMALL))
- || (TARGET_CMODEL != CMODEL_SMALL
- && GET_CODE (XEXP (x, 0)) == CONST
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
- && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == REG
- && REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0)) == TOC_REGISTER
- && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == HIGH
- && rtx_equal_p (XEXP (x, 1),
- XEXP (XEXP (XEXP (XEXP (x, 0), 0), 1), 0)))))
- {
- y = XVECEXP (y, 0, 0);
- if (offset != NULL_RTX)
- y = gen_rtx_PLUS (Pmode, y, offset);
- if (!MEM_P (orig_x))
- return y;
- else
- return replace_equiv_address_nv (orig_x, y);
+ else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
+ {
+ /* Weirdness alert. df_note_compute can replace r2 with a
+ debug_expr when this unspec is in a debug_insn.
+ Seen in gcc.dg/pr51957-1.c */
+ }
+ else
+ {
+ debug_rtx (orig_x);
+ abort ();
}
+#endif
+ y = XVECEXP (y, 0, 0);
+ if (offset != NULL_RTX)
+ y = gen_rtx_PLUS (Pmode, y, offset);
+ if (!MEM_P (orig_x))
+ return y;
+ else
+ return replace_equiv_address_nv (orig_x, y);
}
if (TARGET_MACHO
static bool
rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
- if (GET_CODE (x) == CONST
- && GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH)
+ if (GET_CODE (x) == HIGH
+ && GET_CODE (XEXP (x, 0)) == UNSPEC)
return true;
return rs6000_tls_referenced_p (x);
return RS6000_SYMBOL_REF_TLS_P (*x);
}
+/* Return true iff the given SYMBOL_REF refers to a constant pool entry
+ that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
+ can be addressed relative to the toc pointer. */
+
+static bool
+use_toc_relative_ref (rtx sym)
+{
+ return ((constant_pool_expr_p (sym)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
+ get_pool_mode (sym)))
+ || (TARGET_CMODEL == CMODEL_MEDIUM
+ && !CONSTANT_POOL_ADDRESS_P (sym)
+ && SYMBOL_REF_LOCAL_P (sym)));
+}
+
/* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
replace the input X, or the original X if no replacement is called for.
The output parameter *WIN is 1 if the calling macro should goto WIN,
{
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, (enum reload_type)type);
+ opnum, (enum reload_type) type);
*win = 1;
return x;
}
{
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type)type);
+ opnum, (enum reload_type) type);
*win = 1;
return x;
}
floating point constant. */
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type)type);
+ opnum, (enum reload_type) type);
*win = 1;
return x;
}
#endif
if (TARGET_CMODEL != CMODEL_SMALL
- && GET_CODE (x) == LO_SUM
- && GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
- && REGNO (XEXP (XEXP (x, 0), 0)) == TOC_REGISTER
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST
- && GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 0)) == HIGH
- && GET_CODE (XEXP (x, 1)) == CONST
- && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
- && XINT (XEXP (XEXP (x, 1), 0), 1) == UNSPEC_TOCREL
- && rtx_equal_p (XEXP (XEXP (XEXP (XEXP (x, 0), 1), 0), 0), XEXP (x, 1)))
+ && reg_offset_p
+ && small_toc_ref (x, VOIDmode))
{
+ rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
+ x = gen_rtx_LO_SUM (Pmode, hi, x);
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
opnum, (enum reload_type) type);
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, (enum reload_type)type);
+ opnum, (enum reload_type) type);
*win = 1;
return x;
}
#endif
/* Don't do this for TFmode or TDmode, since the result isn't offsettable.
The same goes for DImode without 64-bit gprs and DFmode and DDmode
- without fprs. */
+ without fprs.
+ ??? Assume floating point reg based on mode? This assumption is
+ violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
+ where reload ends up doing a DFmode load of a constant from
+ mem using two gprs. Unfortunately, at this point reload
+ hasn't yet selected regs so poking around in reload data
+ won't help and even if we could figure out the regs reliably,
+ we'd still want to allow this transformation when the mem is
+ naturally aligned. Since we say the address is good here, we
+ can't disable offsets from LO_SUMs in mem_operand_gpr.
+ FIXME: Allow offset from lo_sum for other modes too, when
+ mem is sufficiently aligned. */
&& mode != TFmode
&& mode != TDmode
&& (mode != DImode || TARGET_POWERPC64)
push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type)type);
+ opnum, (enum reload_type) type);
*win = 1;
return x;
}
if (TARGET_TOC
&& reg_offset_p
&& GET_CODE (x) == SYMBOL_REF
- && constant_pool_expr_p (x)
- && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
+ && use_toc_relative_ref (x))
{
x = create_TOC_reference (x, NULL_RTX);
if (TARGET_CMODEL != CMODEL_SMALL)
32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
because adjacent memory cells are accessed by adding word-sized offsets
during assembly output. */
-bool
+static bool
rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
{
bool reg_offset_p = reg_offset_addressing_ok_p (mode);
|| XEXP (x, 0) == arg_pointer_rtx)
&& GET_CODE (XEXP (x, 1)) == CONST_INT)
return 1;
- if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
+ if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
return 1;
if (mode != TImode
&& mode != TFmode
&& (mode == DFmode || mode == DDmode || mode == DImode))
&& TARGET_UPDATE
&& legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
- && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
+ && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
+ reg_ok_strict, false)
|| (!avoiding_indexed_address_p (mode)
&& legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
&& rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
&& GET_CODE (XEXP (addr, 1)) == CONST_INT)
{
unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
- return val + 12 + 0x8000 >= 0x10000;
+ return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
}
break;
rtx
rs6000_find_base_term (rtx op)
{
- rtx base, offset;
+ rtx base;
- split_const (op, &base, &offset);
+ base = op;
+ if (GET_CODE (base) == CONST)
+ base = XEXP (base, 0);
+ if (GET_CODE (base) == PLUS)
+ base = XEXP (base, 0);
if (GET_CODE (base) == UNSPEC)
switch (XINT (base, 1))
{
in 32-bit mode, that the recog predicate rejects. */
-bool
-rs6000_offsettable_memref_p (rtx op)
+static bool
+rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
{
+ bool worst_case;
+
if (!MEM_P (op))
return false;
/* First mimic offsettable_memref_p. */
- if (offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)))
+ if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
return true;
/* offsettable_address_p invokes rs6000_mode_dependent_address, but
reference and, therefore, assumes that it is the largest supported
mode (TFmode). As a consequence, legitimate offsettable memory
references are rejected. rs6000_legitimate_offset_address_p contains
- the correct logic for the PLUS case of rs6000_mode_dependent_address. */
- return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
+ the correct logic for the PLUS case of rs6000_mode_dependent_address,
+ at least with a little bit of help here given that we know the
+ actual registers used. */
+ worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
+ || GET_MODE_SIZE (reg_mode) == 4);
+ return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
+ true, worst_case);
}
/* Change register usage conditional on target flags. */
if (TARGET_DEBUG_TARGET)
fprintf (stderr, "rs6000_conditional_register_usage called\n");
- /* Set MQ register fixed (already call_used) if not POWER
- architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
- be allocated. */
- if (! TARGET_POWER)
- fixed_regs[64] = 1;
+ /* Set MQ register fixed (already call_used) so that it will not be
+ allocated. */
+ fixed_regs[64] = 1;
/* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
if (TARGET_64BIT)
&& !gpc_reg_operand (operands[1], mode))
operands[1] = force_reg (mode, operands[1]);
- if (mode == SFmode && ! TARGET_POWERPC
- && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
- && GET_CODE (operands[0]) == MEM)
- {
- int regnum;
-
- if (reload_in_progress || reload_completed)
- regnum = true_regnum (operands[1]);
- else if (GET_CODE (operands[1]) == REG)
- regnum = REGNO (operands[1]);
- else
- regnum = -1;
-
- /* If operands[1] is a register, on POWER it may have
- double-precision data in it, so truncate it to single
- precision. */
- if (FP_REGNO_P (regnum) || regnum >= FIRST_PSEUDO_REGISTER)
- {
- rtx newreg;
- newreg = (!can_create_pseudo_p () ? copy_rtx (operands[1])
- : gen_reg_rtx (mode));
- emit_insn (gen_aux_truncdfsf2 (newreg, operands[1]));
- operands[1] = newreg;
- }
- }
-
/* Recognize the case where operand[1] is a reference to thread-local
data and load its address to a register. */
if (rs6000_tls_referenced_p (operands[1]))
/* If this is a SYMBOL_REF that refers to a constant pool entry,
and we have put it in the TOC, we just need to make a TOC-relative
reference to it. */
- if ((TARGET_TOC
- && GET_CODE (operands[1]) == SYMBOL_REF
- && constant_pool_expr_p (operands[1])
- && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands[1]),
- get_pool_mode (operands[1])))
- || (TARGET_CMODEL == CMODEL_MEDIUM
- && GET_CODE (operands[1]) == SYMBOL_REF
- && !CONSTANT_POOL_ADDRESS_P (operands[1])
- && SYMBOL_REF_LOCAL_P (operands[1])))
- {
- rtx reg = NULL_RTX;
- if (TARGET_CMODEL != CMODEL_SMALL)
- {
- if (can_create_pseudo_p ())
- reg = gen_reg_rtx (Pmode);
- else
- reg = operands[0];
- }
- operands[1] = create_TOC_reference (operands[1], reg);
- }
+ if (TARGET_TOC
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && use_toc_relative_ref (operands[1]))
+ operands[1] = create_TOC_reference (operands[1], operands[0]);
else if (mode == Pmode
&& CONSTANT_P (operands[1])
&& GET_CODE (operands[1]) != HIGH
- && !(TARGET_CMODEL != CMODEL_SMALL
- && GET_CODE (operands[1]) == CONST
- && GET_CODE (XEXP (operands[1], 0)) == PLUS
- && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == HIGH)
&& ((GET_CODE (operands[1]) != CONST_INT
&& ! easy_fp_constant (operands[1], mode))
|| (GET_CODE (operands[1]) == CONST_INT
> (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
|| (GET_CODE (operands[0]) == REG
&& FP_REGNO_P (REGNO (operands[0]))))
- && ! legitimate_constant_pool_address_p (operands[1], mode,
- false)
- && ! toc_relative_expr_p (operands[1])
+ && !toc_relative_expr_p (operands[1], false)
&& (TARGET_CMODEL == CMODEL_SMALL
|| can_create_pseudo_p ()
|| (REG_P (operands[0])
get_pool_constant (XEXP (operands[1], 0)),
get_pool_mode (XEXP (operands[1], 0))))
{
- rtx tocref;
- rtx reg = NULL_RTX;
- if (TARGET_CMODEL != CMODEL_SMALL)
- {
- if (can_create_pseudo_p ())
- reg = gen_reg_rtx (Pmode);
- else
- reg = operands[0];
- }
- tocref = create_TOC_reference (XEXP (operands[1], 0), reg);
+ rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
+ operands[0]);
operands[1] = gen_const_mem (mode, tocref);
set_mem_alias_set (operands[1], get_TOC_alias_set ());
}
case TImode:
rs6000_eliminate_indexed_memrefs (operands);
-
- if (TARGET_POWER)
- {
- emit_insn (gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (2,
- gen_rtx_SET (VOIDmode,
- operands[0], operands[1]),
- gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_SCRATCH (SImode)))));
- return;
- }
break;
default:
emit_set:
emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
}
+
+/* Return true if a structure, union or array containing FIELD should be
+ accessed using `BLKMODE'.
+
+ For the SPE, simd types are V2SI, and gcc can be tempted to put the
+ entire thing in a DI and use subregs to access the internals.
+ store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
+ back-end. Because a single GPR can hold a V2SI, but not a DI, the
+ best thing to do is set structs to BLKmode and avoid Severe Tire
+ Damage.
+
+ On e500 v2, DF and DI modes suffer from the same anomaly. DF can
+ fit into 1, whereas DI still needs two. */
+
+static bool
+rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
+{
+ return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
+ || (TARGET_E500_DOUBLE && mode == DFmode));
+}
\f
/* Nonzero if we can use a floating-point register to pass this arg. */
#define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
/* Implement va_arg. */
-tree
+static tree
rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
gimple_seq *post_p)
{
static void
rs6000_init_libfuncs (void)
{
- if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
- && !TARGET_POWER2 && !TARGET_POWERPC)
- {
- /* AIX library routines for float->int conversion. */
- set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
- set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
- set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
- set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
- }
-
if (!TARGET_IEEEQUAD)
/* AIX/Darwin/64-bit Linux quad floating point routines. */
if (!TARGET_XL_COMPAT)
set_optab_libfunc (neg_optab, TFmode, "_q_neg");
set_optab_libfunc (smul_optab, TFmode, "_q_mul");
set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
- if (TARGET_PPC_GPOPT || TARGET_POWER2)
+ if (TARGET_PPC_GPOPT)
set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
set_optab_libfunc (eq_optab, TFmode, "_q_feq");
rtx xop[10];
if (XVECLEN (operands[0], 0) == 1)
- return "{l|lwz} %2,0(%1)";
+ return "lwz %2,0(%1)";
for (i = 0; i < words; i++)
if (refers_to_regno_p (REGNO (operands[2]) + i,
xop[0] = GEN_INT (4 * (words-1));
xop[1] = operands[1];
xop[2] = operands[2];
- output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop);
+ output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
return "";
}
else if (i == 0)
xop[0] = GEN_INT (4 * (words-1));
xop[1] = operands[1];
xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
- output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop);
+ output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
return "";
}
else
xop[0] = GEN_INT (j * 4);
xop[1] = operands[1];
xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
- output_asm_insn ("{l|lwz} %2,%0(%1)", xop);
+ output_asm_insn ("lwz %2,%0(%1)", xop);
}
xop[0] = GEN_INT (i * 4);
xop[1] = operands[1];
- output_asm_insn ("{l|lwz} %1,%0(%1)", xop);
+ output_asm_insn ("lwz %1,%0(%1)", xop);
return "";
}
}
- return "{lsi|lswi} %2,%1,%N0";
+ return "lswi %2,%1,%N0";
}
\f
if (rclass == GENERAL_REGS || rclass == BASE_REGS)
{
if (!legitimate_indirect_address_p (addr, false)
- && !rs6000_legitimate_offset_address_p (TImode, addr, false))
+ && !rs6000_legitimate_offset_address_p (TImode, addr,
+ false, true))
{
sri->icode = icode;
/* account for splitting the loads, and converting the
&& MEM_P (x)
&& GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
{
- rtx addr = XEXP (x, 0);
-
- if (GET_CODE (addr) == PRE_MODIFY)
- addr = XEXP (addr, 1);
- else if (GET_CODE (addr) == LO_SUM
- && GET_CODE (XEXP (addr, 0)) == REG
- && GET_CODE (XEXP (addr, 1)) == CONST)
- addr = XEXP (XEXP (addr, 1), 0);
+ rtx off = address_offset (XEXP (x, 0));
+ unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
- if (GET_CODE (addr) == PLUS
- && GET_CODE (XEXP (addr, 1)) == CONST_INT
- && (INTVAL (XEXP (addr, 1)) & 3) != 0)
+ if (off != NULL_RTX
+ && (INTVAL (off) & 3) != 0
+ && (unsigned HOST_WIDE_INT) INTVAL (off) + 0x8000 < 0x10000 - extra)
{
if (in_p)
sri->icode = CODE_FOR_reload_di_load;
else
default_p = true;
}
+ else if (!TARGET_POWERPC64
+ && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
+ && MEM_P (x)
+ && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
+ {
+ rtx off = address_offset (XEXP (x, 0));
+ unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
+
+ /* We need a secondary reload only when our legitimate_address_p
+ says the address is good (as otherwise the entire address
+ will be reloaded). So for mode sizes of 8 and 16 this will
+ be when the offset is in the ranges [0x7ffc,0x7fff] and
+ [0x7ff4,0x7ff7] respectively. Note that the address we see
+ here may have been manipulated by legitimize_reload_address. */
+ if (off != NULL_RTX
+ && ((unsigned HOST_WIDE_INT) INTVAL (off) - (0x8000 - extra)
+ < UNITS_PER_WORD))
+ {
+ if (in_p)
+ sri->icode = CODE_FOR_reload_si_load;
+ else
+ sri->icode = CODE_FOR_reload_si_store;
+ sri->extra_cost = 2;
+ ret = NO_REGS;
+ }
+ else
+ default_p = true;
+ }
else
default_p = true;
}
if (GET_CODE (addr) == PLUS
- && (!rs6000_legitimate_offset_address_p (TImode, addr, false)
- || and_op2 != NULL_RTX))
+ && (and_op2 != NULL_RTX
+ || !rs6000_legitimate_offset_address_p (TImode, addr,
+ false, true)))
{
addr_op1 = XEXP (addr, 0);
addr_op2 = XEXP (addr, 1);
scratch_or_premodify = scratch;
}
else if (!legitimate_indirect_address_p (addr, false)
- && !rs6000_legitimate_offset_address_p (TImode, addr, false))
+ && !rs6000_legitimate_offset_address_p (TImode, addr,
+ false, true))
{
if (TARGET_DEBUG_ADDR)
{
&& GET_MODE_SIZE (mode) == 8
&& and_op2 == NULL_RTX
&& scratch_or_premodify == scratch
- && rs6000_legitimate_offset_address_p (mode, addr, false)))
+ && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
;
else if (GET_CODE (addr) == PLUS)
}
/* Convert reloads involving 64-bit gprs and misaligned offset
- addressing to use indirect addressing. */
+ addressing, or multiple 32-bit gprs and offsets that are too large,
+ to use indirect addressing. */
void
-rs6000_secondary_reload_ppc64 (rtx reg, rtx mem, rtx scratch, bool store_p)
+rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
{
int regno = true_regnum (reg);
enum reg_class rclass;
if (TARGET_DEBUG_ADDR)
{
- fprintf (stderr, "\nrs6000_secondary_reload_ppc64, type = %s\n",
+ fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
store_p ? "store" : "load");
fprintf (stderr, "reg:\n");
debug_rtx (reg);
print_operand (FILE *file, rtx x, int code)
{
int i;
- HOST_WIDE_INT val;
unsigned HOST_WIDE_INT uval;
switch (code)
case 'D':
/* Like 'J' but get to the GT bit only. */
- gcc_assert (GET_CODE (x) == REG);
+ gcc_assert (REG_P (x));
/* Bit 1 is GT bit. */
i = 4 * (REGNO (x) - CR0_REGNO) + 1;
case 'L':
/* Write second word of DImode or DFmode reference. Works on register
or non-indexed memory only. */
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fputs (reg_names[REGNO (x) + 1], file);
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
{
/* Handle possible auto-increment. Since it is pre-increment and
we have already done it, we can just use an offset of word. */
case 't':
/* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
- gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == CCmode);
+ gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
/* Bit 3 is OV bit. */
i = 4 * (REGNO (x) - CR0_REGNO) + 3;
&& REGNO (x) != CTR_REGNO))
output_operand_lossage ("invalid %%T value");
else if (REGNO (x) == LR_REGNO)
- fputs (TARGET_NEW_MNEMONICS ? "lr" : "r", file);
+ fputs ("lr", file);
else
fputs ("ctr", file);
return;
case 'U':
/* Print `u' if this has an auto-increment or auto-decrement. */
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC
|| GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
case 'W':
/* MB value for a PowerPC64 rldic operand. */
- val = (GET_CODE (x) == CONST_INT
- ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
-
- if (val < 0)
- i = -1;
- else
- for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
- if ((val <<= 1) < 0)
- break;
+ i = clz_hwi (GET_CODE (x) == CONST_INT
+ ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
#if HOST_BITS_PER_WIDE_INT == 32
- if (GET_CODE (x) == CONST_INT && i >= 0)
+ if (GET_CODE (x) == CONST_INT && i > 0)
i += 32; /* zero-extend high-part was all 0's */
else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
- {
- val = CONST_DOUBLE_LOW (x);
-
- gcc_assert (val);
- if (val < 0)
- --i;
- else
- for ( ; i < 64; i++)
- if ((val <<= 1) < 0)
- break;
- }
+ i = clz_hwi (CONST_DOUBLE_LOW (x)) + 32;
#endif
- fprintf (file, "%d", i + 1);
+ fprintf (file, "%d", i);
return;
case 'x':
return;
case 'X':
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& (legitimate_indexed_address_p (XEXP (x, 0), 0)
|| (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
&& legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
case 'Y':
/* Like 'L', for third word of TImode */
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fputs (reg_names[REGNO (x) + 2], file);
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
{
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
case 'Z':
/* Like 'L', for last word of TImode. */
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fputs (reg_names[REGNO (x) + 3], file);
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
{
if (GET_CODE (XEXP (x, 0)) == PRE_INC
|| GET_CODE (XEXP (x, 0)) == PRE_DEC)
{
rtx tmp;
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
tmp = XEXP (x, 0);
|| GET_MODE (x) == TImode))
{
/* Handle [reg]. */
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
{
fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
break;
{
int x;
- gcc_assert (GET_CODE (XEXP (tmp, 0)) == REG);
+ gcc_assert (REG_P (XEXP (tmp, 0)));
x = INTVAL (XEXP (tmp, 1));
fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
else if (VECTOR_MEM_VSX_P (GET_MODE (x))
&& GET_CODE (tmp) == PRE_MODIFY)
tmp = XEXP (tmp, 1);
- if (GET_CODE (tmp) == REG)
+ if (REG_P (tmp))
fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
else
{
}
case 0:
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fprintf (file, "%s", reg_names[REGNO (x)]);
- else if (GET_CODE (x) == MEM)
+ else if (MEM_P (x))
{
/* We need to handle PRE_INC and PRE_DEC here, since we need to
know the width from the mode. */
}
else
{
- if (toc_relative_expr_p (x))
+ if (toc_relative_expr_p (x, false))
/* This hack along with a corresponding hack in
rs6000_output_addr_const_extra arranges to output addends
where the assembler expects to find them. eg.
- (const (plus (unspec [symbol_ref ("x") tocrel]) 4))
+ (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
without this hack would be output as "x@toc+4". We
want "x+4@toc". */
- output_addr_const (file, tocrel_base);
+ output_addr_const (file, CONST_CAST_RTX (tocrel_base));
else
output_addr_const (file, x);
}
void
print_operand_address (FILE *file, rtx x)
{
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
|| GET_CODE (x) == LABEL_REF)
else
gcc_assert (!TARGET_TOC);
}
- else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
+ else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
+ && REG_P (XEXP (x, 1)))
{
- gcc_assert (REG_P (XEXP (x, 0)));
if (REGNO (XEXP (x, 0)) == 0)
fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
reg_names[ REGNO (XEXP (x, 0)) ]);
fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
reg_names[ REGNO (XEXP (x, 1)) ]);
}
- else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
#if TARGET_MACHO
- else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
+ else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
&& CONSTANT_P (XEXP (x, 1)))
{
fprintf (file, "lo16(");
fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
}
#endif
- else if (legitimate_constant_pool_address_p (x, QImode, true))
+#if TARGET_ELF
+ else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ }
+#endif
+ else if (toc_relative_expr_p (x, false))
{
/* This hack along with a corresponding hack in
rs6000_output_addr_const_extra arranges to output addends
where the assembler expects to find them. eg.
(lo_sum (reg 9)
- . (const (plus (unspec [symbol_ref ("x") tocrel]) 8)))
+ . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
without this hack would be output as "x@toc+8@l(9)". We
want "x+8@toc@l(9)". */
- output_addr_const (file, tocrel_base);
+ output_addr_const (file, CONST_CAST_RTX (tocrel_base));
if (GET_CODE (x) == LO_SUM)
- fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
else
- fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
- }
-#if TARGET_ELF
- else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
- && CONSTANT_P (XEXP (x, 1)))
- {
- output_addr_const (file, XEXP (x, 1));
- fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
}
-#endif
else
gcc_unreachable ();
}
switch (XINT (x, 1))
{
case UNSPEC_TOCREL:
- gcc_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF);
+ gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
+ && REG_P (XVECEXP (x, 0, 1))
+ && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
output_addr_const (file, XVECEXP (x, 0, 0));
if (x == tocrel_base && tocrel_offset != const0_rtx)
{
if (INTVAL (tocrel_offset) >= 0)
fprintf (file, "+");
- output_addr_const (file, tocrel_offset);
+ output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
}
if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
{
else
comp_mode = CCmode;
+ /* If we have an unsigned compare, make sure we don't have a signed value as
+ an immediate. */
+ if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
+ && INTVAL (op1) < 0)
+ {
+ op0 = copy_rtx_if_shared (op0);
+ op1 = force_reg (GET_MODE (op0), op1);
+ cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
+ }
+
/* First, the compare. */
compare_result = gen_reg_rtx (comp_mode);
gcc_unreachable ();
}
- /* Maybe we have a guess as to how likely the branch is.
- The old mnemonics don't have a way to specify this information. */
+ /* Maybe we have a guess as to how likely the branch is. */
pred = "";
note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
if (note != NULL_RTX)
}
if (label == NULL)
- s += sprintf (s, "{b%sr|b%slr%s} ", ccode, ccode, pred);
+ s += sprintf (s, "b%slr%s ", ccode, pred);
else
- s += sprintf (s, "{b%s|b%s%s} ", ccode, ccode, pred);
+ s += sprintf (s, "b%s%s ", ccode, pred);
/* We need to escape any '%' characters in the reg_names string.
Assume they'd only be the first character.... */
if (GET_MODE (false_cond) != result_mode)
return 0;
+ /* Don't allow using floating point comparisons for integer results for
+ now. */
+ if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
+ return 0;
+
/* First, work out if the hardware can do this at all, or
if it's too slow.... */
if (!FLOAT_MODE_P (compare_mode))
/* Expand barriers before and after a load_locked/store_cond sequence. */
-static void
-rs6000_pre_atomic_barrier (enum memmodel model)
+static rtx
+rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
{
+ rtx addr = XEXP (mem, 0);
+ int strict_p = (reload_in_progress || reload_completed);
+
+ if (!legitimate_indirect_address_p (addr, strict_p)
+ && !legitimate_indexed_address_p (addr, strict_p))
+ {
+ addr = force_reg (Pmode, addr);
+ mem = replace_equiv_address_nv (mem, addr);
+ }
+
switch (model)
{
case MEMMODEL_RELAXED:
default:
gcc_unreachable ();
}
+ return mem;
}
static void
else if (reg_overlap_mentioned_p (retval, oldval))
oldval = copy_to_reg (oldval);
- rs6000_pre_atomic_barrier (mod_s);
+ mem = rs6000_pre_atomic_barrier (mem, mod_s);
label1 = NULL_RTX;
if (!is_weak)
mode = SImode;
}
- rs6000_pre_atomic_barrier (model);
+ mem = rs6000_pre_atomic_barrier (mem, model);
label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
emit_label (XEXP (label, 0));
mode = SImode;
}
- rs6000_pre_atomic_barrier (model);
+ mem = rs6000_pre_atomic_barrier (mem, model);
label = gen_label_rtx ();
emit_label (label);
emit_insn (gen_add3_insn (breg, breg, delta_rtx));
src = replace_equiv_address (src, breg);
}
- else if (! rs6000_offsettable_memref_p (src))
+ else if (! rs6000_offsettable_memref_p (src, reg_mode))
{
if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
{
emit_insn (gen_add3_insn (breg, breg, delta_rtx));
dst = replace_equiv_address (dst, breg);
}
- else if (!rs6000_offsettable_memref_p (dst)
+ else if (!rs6000_offsettable_memref_p (dst, reg_mode)
&& GET_CODE (XEXP (dst, 0)) != LO_SUM)
{
if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
}
}
else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
- gcc_assert (rs6000_offsettable_memref_p (dst));
+ gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
}
for (i = 0; i < nregs; i++)
/* This page contains routines that are used to determine what the
function prologue and epilogue code will do and write them out. */
+static inline bool
+save_reg_p (int r)
+{
+ return !call_used_regs[r] && df_regs_ever_live_p (r);
+}
+
/* Return the first fixed-point register that is required to be
saved. 32 if none. */
/* Find lowest numbered live register. */
for (first_reg = 13; first_reg <= 31; first_reg++)
- if (df_regs_ever_live_p (first_reg)
- && (! call_used_regs[first_reg]
- || (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
- && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
- || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
- || (TARGET_TOC && TARGET_MINIMAL_TOC)))))
+ if (save_reg_p (first_reg))
break;
+ if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
+ || (TARGET_TOC && TARGET_MINIMAL_TOC))
+ && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
+ first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
+
#if TARGET_MACHO
if (flag_pic
&& crtl->uses_pic_offset_table
/* Find lowest numbered live register. */
for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
- if (df_regs_ever_live_p (first_reg))
+ if (save_reg_p (first_reg))
break;
return first_reg;
/* Find lowest numbered live register. */
for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
- if (df_regs_ever_live_p (i))
+ if (save_reg_p (i))
break;
return i;
REST_INLINE_VRS = 0x200
};
-#ifndef V_SAVE_INLINE
-#define V_SAVE_INLINE(FIRST_REG) 1
-#endif
-
static int
rs6000_savres_strategy (rs6000_stack_t *info,
bool using_static_chain_p)
| SAVE_INLINE_VRS | REST_INLINE_VRS);
if (info->first_fp_reg_save == 64
- || FP_SAVE_INLINE (info->first_fp_reg_save)
/* The out-of-line FP routines use double-precision stores;
we can't use those routines if we don't have such stores. */
|| (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
if (info->first_gp_reg_save == 32
- || GP_SAVE_INLINE (info->first_gp_reg_save)
|| (!(strategy & SAVRES_MULTIPLE)
&& global_regs_p (info->first_gp_reg_save, 32)))
strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
- || V_SAVE_INLINE (info->first_altivec_reg_save)
|| global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
+ /* Define cutoff for using out-of-line functions to save registers. */
+ if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
+ {
+ if (!optimize_size)
+ {
+ strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
+ strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
+ strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
+ }
+ else
+ {
+ /* Prefer out-of-line restore if it will exit. */
+ if (info->first_fp_reg_save > 61)
+ strategy |= SAVE_INLINE_FPRS;
+ if (info->first_gp_reg_save > 29)
+ {
+ if (info->first_fp_reg_save == 64)
+ strategy |= SAVE_INLINE_GPRS;
+ else
+ strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
+ }
+ if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
+ strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
+ }
+ }
+ else if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ if (info->first_fp_reg_save > 60)
+ strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
+ if (info->first_gp_reg_save > 29)
+ strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
+ strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
+ }
+ else
+ {
+ gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
+ if (info->first_fp_reg_save > 61)
+ strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
+ strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
+ strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
+ }
+
/* Don't bother to try to save things out-of-line if r11 is occupied
by the static chain. It would require too much fiddling and the
static chain is rarely used anyway. FPRs are saved w.r.t the stack
pointer on Darwin, and AIX uses r1 or r12. */
if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
- strategy |= ((DEFAULT_ABI == ABI_DARWIN
- ? 0 : SAVE_INLINE_FPRS | REST_INLINE_FPRS)
+ strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
| SAVE_INLINE_GPRS
| SAVE_INLINE_VRS | REST_INLINE_VRS);
+ /* We can only use the out-of-line routines to restore if we've
+ saved all the registers from first_fp_reg_save in the prologue.
+ Otherwise, we risk loading garbage. */
+ if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
+ {
+ int i;
+
+ for (i = info->first_fp_reg_save; i < 64; i++)
+ if (!save_reg_p (i))
+ {
+ strategy |= REST_INLINE_FPRS;
+ break;
+ }
+ }
+
/* If we are going to use store multiple, then don't even bother
with the out-of-line routines, since the store-multiple
instruction will always be smaller. */
|| info->first_fp_reg_save != 64))
strategy |= REST_INLINE_GPRS;
- /* We can only use load multiple or the out-of-line routines to
- restore if we've used store multiple or out-of-line routines
- in the prologue, i.e. if we've saved all the registers from
- first_gp_reg_save. Otherwise, we risk loading garbage. */
- if ((strategy & (SAVE_INLINE_GPRS | SAVRES_MULTIPLE)) == SAVE_INLINE_GPRS)
- strategy |= REST_INLINE_GPRS;
-
/* Saving CR interferes with the exit routines used on the SPE, so
just punt here. */
if (TARGET_SPE_ABI
&& info->cr_save_p)
strategy |= REST_INLINE_GPRS;
-#if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
- if (TARGET_64BIT)
+ /* We can only use load multiple or the out-of-line routines to
+ restore if we've used store multiple or out-of-line routines
+ in the prologue, i.e. if we've saved all the registers from
+ first_gp_reg_save. Otherwise, we risk loading garbage. */
+ if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
+ == SAVE_INLINE_GPRS)
+ {
+ int i;
+
+ for (i = info->first_gp_reg_save; i < 32; i++)
+ if (!save_reg_p (i))
+ {
+ strategy |= REST_INLINE_GPRS;
+ break;
+ }
+ }
+
+ if (TARGET_ELF && TARGET_64BIT)
{
if (!(strategy & SAVE_INLINE_FPRS))
strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
&& info->first_fp_reg_save == 64)
strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
}
-#else
- if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
+ else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
-#endif
+
if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
- info_ptr->first_altivec_reg_save);
/* Does this function call anything? */
- info_ptr->calls_p = (! current_function_is_leaf
+ info_ptr->calls_p = (! crtl->is_leaf
|| cfun->machine->ra_needs_full_frame);
/* Determine if we need to save the condition code registers. */
rtx
create_TOC_reference (rtx symbol, rtx largetoc_reg)
{
- rtx tocrel, tocreg;
+ rtx tocrel, tocreg, hi;
if (TARGET_DEBUG_ADDR)
{
if (!can_create_pseudo_p ())
df_set_regs_ever_live (TOC_REGISTER, true);
- tocrel = gen_rtx_CONST (Pmode,
- gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol),
- UNSPEC_TOCREL));
tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
- if (TARGET_CMODEL != CMODEL_SMALL)
+ tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
+ if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
+ return tocrel;
+
+ hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
+ if (largetoc_reg != NULL)
{
- rtx hi = gen_rtx_CONST (Pmode,
- gen_rtx_PLUS (Pmode, tocreg,
- gen_rtx_HIGH (Pmode, tocrel)));
- if (largetoc_reg != NULL)
- {
- emit_move_insn (largetoc_reg, hi);
- hi = largetoc_reg;
- }
- return gen_rtx_LO_SUM (Pmode, hi, copy_rtx (tocrel));
+ emit_move_insn (largetoc_reg, hi);
+ hi = largetoc_reg;
}
- else
- return gen_rtx_PLUS (Pmode, tocreg, tocrel);
+ return gen_rtx_LO_SUM (Pmode, hi, tocrel);
}
/* Issue assembly directives that create a reference to the given DWARF
rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
{
fprintf (asm_out_file, "\t.ref %s\n",
- TARGET_STRIP_NAME_ENCODING (frame_table_label));
+ (* targetm.strip_name_encoding) (frame_table_label));
}
\f
/* This ties together stack memory (MEM with an alias set of frame_alias_set)
xops[0] = reg1;
xops[1] = reg2;
if (TARGET_64BIT)
- output_asm_insn ("{cmp|cmpd} 0,%0,%1", xops);
+ output_asm_insn ("cmpd 0,%0,%1", xops);
else
- output_asm_insn ("{cmp|cmpw} 0,%0,%1", xops);
+ output_asm_insn ("cmpw 0,%0,%1", xops);
fputs ("\tbeq 0,", asm_out_file);
assemble_name_raw (asm_out_file, end_lab);
/* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
xops[1] = GEN_INT (-PROBE_INTERVAL);
- output_asm_insn ("{cal %0,%1(%0)|addi %0,%0,%1}", xops);
+ output_asm_insn ("addi %0,%0,%1", xops);
/* Probe at TEST_ADDR and branch. */
xops[1] = gen_rtx_REG (Pmode, 0);
- output_asm_insn ("{st|stw} %1,0(%0)", xops);
+ output_asm_insn ("stw %1,0(%0)", xops);
fprintf (asm_out_file, "\tb ");
assemble_name_raw (asm_out_file, loop_lab);
fputc ('\n', asm_out_file);
emit_frame_save (rtx frame_reg, enum machine_mode mode,
unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
{
- rtx reg, insn, mem, addr;
+ rtx reg, insn;
/* Some cases that need register indexed addressing. */
gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
&& !SPE_CONST_OFFSET_OK (offset))));
reg = gen_rtx_REG (mode, regno);
- addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
- mem = gen_frame_mem (mode, addr);
- insn = emit_move_insn (mem, reg);
+ insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
NULL_RTX, NULL_RTX);
}
int_rtx = GEN_INT (offset);
- if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
+ if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
|| (TARGET_E500_DOUBLE && mode == DFmode))
{
offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
return NULL_RTX;
}
+/* Return the register number used as a pointer by out-of-line
+ save/restore functions. */
+
static inline unsigned
ptr_regno_for_savres (int sel)
{
{
int i;
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
- if (df_regs_ever_live_p (info->first_fp_reg_save + i)
- && ! call_used_regs[info->first_fp_reg_save + i])
+ if (save_reg_p (info->first_fp_reg_save + i))
emit_frame_save (frame_reg_rtx,
(TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
? DFmode : SFmode),
int sel = SAVRES_SAVE | SAVRES_VR;
unsigned ptr_regno = ptr_regno_for_savres (sel);
+ if (using_static_chain_p
+ && ptr_regno == STATIC_CHAIN_REGNUM)
+ ptr_regno = 12;
if (REGNO (frame_reg_rtx) != ptr_regno)
START_USE (ptr_regno);
ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
&& TARGET_ALTIVEC_VRSAVE
&& info->vrsave_mask != 0)
{
- rtx reg, mem, vrsave;
+ rtx reg, vrsave;
int offset;
int save_regno;
- /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
- as frame_reg_rtx and r11 as the static chain pointer for
- nested functions. */
+ /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
+ be using r12 as frame_reg_rtx and r11 as the static chain
+ pointer for nested functions. */
save_regno = 12;
if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
save_regno = 11;
/* Save VRSAVE. */
offset = info->vrsave_save_offset + frame_off;
- mem = gen_frame_mem (SImode,
- gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (offset)));
- insn = emit_move_insn (mem, reg);
+ insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
/* Include the registers in the mask. */
emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
linux-unwind.h frob_update_context. */
if (rs6000_save_toc_in_prologue_p ())
{
- rtx addr = gen_rtx_PLUS (Pmode, sp_reg_rtx, GEN_INT (5 * reg_size));
- rtx mem = gen_frame_mem (reg_mode, addr);
- emit_move_insn (mem, gen_rtx_REG (reg_mode, TOC_REGNUM));
+ rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
+ emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
}
}
}
}
- /* Write .extern for AIX common mode routines, if needed. */
- if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
- {
- fputs ("\t.extern __mulh\n", file);
- fputs ("\t.extern __mull\n", file);
- fputs ("\t.extern __divss\n", file);
- fputs ("\t.extern __divus\n", file);
- fputs ("\t.extern __quoss\n", file);
- fputs ("\t.extern __quous\n", file);
- common_mode_defined = 1;
- }
-
rs6000_pic_labelno++;
}
if (using_mfcr_multiple)
{
for (i = 0; i < 8; i++)
- if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
+ if (save_reg_p (CR0_REGNO + i))
count++;
gcc_assert (count);
}
ndx = 0;
for (i = 0; i < 8; i++)
- if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
+ if (save_reg_p (CR0_REGNO + i))
{
rtvec r = rtvec_alloc (2);
RTVEC_ELT (r, 0) = reg;
RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
RTVEC_ELT (p, ndx) =
- gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
+ gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
ndx++;
}
}
else
for (i = 0; i < 8; i++)
- if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
- emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO+i),
+ if (save_reg_p (CR0_REGNO + i))
+ emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
reg));
if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
|| (DEFAULT_ABI != ABI_V4
&& offset_below_red_zone_p (info->vrsave_save_offset))))
{
- rtx addr, mem, reg;
+ rtx reg;
if (frame_reg_rtx == sp_reg_rtx)
{
frame_reg_rtx = hard_frame_pointer_rtx;
}
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->vrsave_save_offset + frame_off));
- mem = gen_frame_mem (SImode, addr);
reg = gen_rtx_REG (SImode, 12);
- emit_move_insn (reg, mem);
+ emit_insn (gen_frame_load (reg, frame_reg_rtx,
+ info->vrsave_save_offset + frame_off));
emit_insn (generate_set_vrsave (reg, info, 1));
}
&& (DEFAULT_ABI == ABI_V4
|| !offset_below_red_zone_p (info->vrsave_save_offset)))
{
- rtx addr, mem, reg;
+ rtx reg;
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->vrsave_save_offset + frame_off));
- mem = gen_frame_mem (SImode, addr);
reg = gen_rtx_REG (SImode, 12);
- emit_move_insn (reg, mem);
+ emit_insn (gen_frame_load (reg, frame_reg_rtx,
+ info->vrsave_save_offset + frame_off));
emit_insn (generate_set_vrsave (reg, info, 1));
}
if (TARGET_AIX)
{
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (frame_off + 5 * reg_size));
- rtx mem = gen_frame_mem (reg_mode, addr);
-
- emit_move_insn (gen_rtx_REG (reg_mode, 2), mem);
+ rtx reg = gen_rtx_REG (reg_mode, 2);
+ emit_insn (gen_frame_load (reg, frame_reg_rtx,
+ frame_off + 5 * reg_size));
}
for (i = 0; ; ++i)
if (regno == INVALID_REGNUM)
break;
+ /* Note: possible use of r0 here to address SPE regs. */
mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
info->ehrd_offset + frame_off
+ reg_size * (int) i);
{
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
- {
- rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->gp_save_offset
- + frame_off
- + reg_size * i));
- rtx mem = gen_frame_mem (reg_mode, addr);
- rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
-
- emit_move_insn (reg, mem);
- }
+ emit_insn (gen_frame_load
+ (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
+ frame_reg_rtx,
+ info->gp_save_offset + frame_off + reg_size * i));
}
if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
/* Restore fpr's if we need to do it without calling a function. */
if (restoring_FPRs_inline)
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
- if ((df_regs_ever_live_p (info->first_fp_reg_save + i)
- && !call_used_regs[info->first_fp_reg_save + i]))
+ if (save_reg_p (info->first_fp_reg_save + i))
{
- rtx addr, mem, reg;
- addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
- GEN_INT (info->fp_save_offset
- + frame_off
- + 8 * i));
- mem = gen_frame_mem ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
- ? DFmode : SFmode), addr);
- reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
- ? DFmode : SFmode),
- info->first_fp_reg_save + i);
-
- emit_move_insn (reg, mem);
+ rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
+ ? DFmode : SFmode),
+ info->first_fp_reg_save + i);
+ emit_insn (gen_frame_load (reg, frame_reg_rtx,
+ info->fp_save_offset + frame_off + 8 * i));
if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
}
fprintf (file, "\tmflr %s\n", reg_names[0]);
if (NO_PROFILE_COUNTERS)
{
- asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ asm_fprintf (file, "\tstw %s,4(%s)\n",
reg_names[0], reg_names[1]);
}
else if (TARGET_SECURE_PLT && flag_pic)
}
else
asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
- asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ asm_fprintf (file, "\tstw %s,4(%s)\n",
reg_names[0], reg_names[1]);
asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
- asm_fprintf (file, "\t{cau|addis} %s,%s,",
+ asm_fprintf (file, "\taddis %s,%s,",
reg_names[12], reg_names[12]);
assemble_name (file, buf);
- asm_fprintf (file, "-1b@ha\n\t{cal|la} %s,", reg_names[0]);
+ asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
assemble_name (file, buf);
asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
}
else if (flag_pic == 1)
{
fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
- asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ asm_fprintf (file, "\tstw %s,4(%s)\n",
reg_names[0], reg_names[1]);
asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
- asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
+ asm_fprintf (file, "\tlwz %s,", reg_names[0]);
assemble_name (file, buf);
asm_fprintf (file, "@got(%s)\n", reg_names[12]);
}
else if (flag_pic > 1)
{
- asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ asm_fprintf (file, "\tstw %s,4(%s)\n",
reg_names[0], reg_names[1]);
/* Now, we need to get the address of the label. */
if (TARGET_LINK_STACK)
fputs ("-.\n1:", file);
asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
}
- asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
+ asm_fprintf (file, "\tlwz %s,0(%s)\n",
reg_names[0], reg_names[11]);
- asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
+ asm_fprintf (file, "\tadd %s,%s,%s\n",
reg_names[0], reg_names[0], reg_names[11]);
}
else
{
- asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
+ asm_fprintf (file, "\tlis %s,", reg_names[12]);
assemble_name (file, buf);
fputs ("@ha\n", file);
- asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ asm_fprintf (file, "\tstw %s,4(%s)\n",
reg_names[0], reg_names[1]);
- asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
+ asm_fprintf (file, "\tla %s,", reg_names[0]);
assemble_name (file, buf);
asm_fprintf (file, "@l(%s)\n", reg_names[12]);
}
scheduling pass will not know about this latency since
the mtctr instruction, which has the latency associated
to it, will be generated by reload. */
- return TARGET_POWER ? 5 : 4;
+ return 4;
case TYPE_BRANCH:
/* Leave some extra cycles between a compare and its
dependent branch, to inhibit expensive mispredicts. */
|| rs6000_cpu_attr == CPU_PPC750
|| rs6000_cpu_attr == CPU_PPC7400
|| rs6000_cpu_attr == CPU_PPC7450
+ || rs6000_cpu_attr == CPU_PPCE5500
+ || rs6000_cpu_attr == CPU_PPCE6500
|| rs6000_cpu_attr == CPU_POWER4
|| rs6000_cpu_attr == CPU_POWER5
|| rs6000_cpu_attr == CPU_POWER7
return false;
}
-/* The function returns true if the target storage location of
- out_insn is adjacent to the target storage location of in_insn */
-/* Return 1 if memory locations are adjacent. */
+/* Try to determine base/offset/size parts of the given MEM.
+ Return true if successful, false if all the values couldn't
+ be determined.
+
+ This function only looks for REG or REG+CONST address forms.
+ REG+REG address form will return false. */
static bool
-adjacent_mem_locations (rtx insn1, rtx insn2)
+get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
+ HOST_WIDE_INT *size)
{
+ rtx addr_rtx;
+ if MEM_SIZE_KNOWN_P (mem)
+ *size = MEM_SIZE (mem);
+ else
+ return false;
- rtx a = get_store_dest (PATTERN (insn1));
- rtx b = get_store_dest (PATTERN (insn2));
+ if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
+ addr_rtx = XEXP (XEXP (mem, 0), 1);
+ else
+ addr_rtx = (XEXP (mem, 0));
- if ((GET_CODE (XEXP (a, 0)) == REG
- || (GET_CODE (XEXP (a, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
- && (GET_CODE (XEXP (b, 0)) == REG
- || (GET_CODE (XEXP (b, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ if (GET_CODE (addr_rtx) == REG)
+ {
+ *base = addr_rtx;
+ *offset = 0;
+ }
+ else if (GET_CODE (addr_rtx) == PLUS
+ && CONST_INT_P (XEXP (addr_rtx, 1)))
{
- HOST_WIDE_INT val0 = 0, val1 = 0, val_diff;
- rtx reg0, reg1;
+ *base = XEXP (addr_rtx, 0);
+ *offset = INTVAL (XEXP (addr_rtx, 1));
+ }
+ else
+ return false;
- if (GET_CODE (XEXP (a, 0)) == PLUS)
- {
- reg0 = XEXP (XEXP (a, 0), 0);
- val0 = INTVAL (XEXP (XEXP (a, 0), 1));
- }
- else
- reg0 = XEXP (a, 0);
+ return true;
+}
- if (GET_CODE (XEXP (b, 0)) == PLUS)
- {
- reg1 = XEXP (XEXP (b, 0), 0);
- val1 = INTVAL (XEXP (XEXP (b, 0), 1));
- }
- else
- reg1 = XEXP (b, 0);
+/* The function returns true if the target storage location of
+ mem1 is adjacent to the target storage location of mem2 */
+/* Return 1 if memory locations are adjacent. */
+
+static bool
+adjacent_mem_locations (rtx mem1, rtx mem2)
+{
+ rtx reg1, reg2;
+ HOST_WIDE_INT off1, size1, off2, size2;
- val_diff = val1 - val0;
+ if (get_memref_parts (mem1, ®1, &off1, &size1)
+ && get_memref_parts (mem2, ®2, &off2, &size2))
+ return ((REGNO (reg1) == REGNO (reg2))
+ && ((off1 + size1 == off2)
+ || (off2 + size2 == off1)));
- return ((REGNO (reg0) == REGNO (reg1))
- && ((MEM_SIZE_KNOWN_P (a) && val_diff == MEM_SIZE (a))
- || (MEM_SIZE_KNOWN_P (b) && val_diff == -MEM_SIZE (b))));
- }
+ return false;
+}
+
+/* This function returns true if it can be determined that the two MEM
+ locations overlap by at least 1 byte based on base reg/offset/size. */
+
+static bool
+mem_locations_overlap (rtx mem1, rtx mem2)
+{
+ rtx reg1, reg2;
+ HOST_WIDE_INT off1, size1, off2, size2;
+
+ if (get_memref_parts (mem1, ®1, &off1, &size1)
+ && get_memref_parts (mem2, ®2, &off2, &size2))
+ return ((REGNO (reg1) == REGNO (reg2))
+ && (((off1 <= off2) && (off1 + size1 > off2))
+ || ((off2 <= off1) && (off2 + size2 > off1))));
return false;
}
static int
rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
{
+ rtx load_mem, str_mem;
/* On machines (like the 750) which have asymmetric integer units,
where one integer unit can do multiply and divides and the other
can't, reduce the priority of multiply/divide so it is scheduled
}
if (rs6000_cpu == PROCESSOR_POWER6
- && ((load_store_pendulum == -2 && is_load_insn (insn))
- || (load_store_pendulum == 2 && is_store_insn (insn))))
+ && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
+ || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
/* Attach highest priority to insn if the scheduler has just issued two
stores and this instruction is a load, or two loads and this instruction
is a store. Power6 wants loads and stores scheduled alternately
return 1;
switch (rs6000_cpu_attr) {
- case CPU_RIOS1: /* ? */
case CPU_RS64A:
case CPU_PPC601: /* ? */
case CPU_PPC7450:
case CPU_PPC750:
case CPU_PPC7400:
case CPU_PPC8540:
+ case CPU_PPC8548:
case CPU_CELL:
case CPU_PPCE300C2:
case CPU_PPCE300C3:
case CPU_PPCE500MC:
case CPU_PPCE500MC64:
+ case CPU_PPCE5500:
+ case CPU_PPCE6500:
case CPU_TITAN:
return 2;
- case CPU_RIOS2:
case CPU_PPC476:
case CPU_PPC604:
case CPU_PPC604E:
static int
rs6000_use_sched_lookahead (void)
{
- if (rs6000_cpu_attr == CPU_PPC8540)
- return 4;
- if (rs6000_cpu_attr == CPU_CELL)
- return (reload_completed ? 8 : 0);
- return 0;
+ switch (rs6000_cpu_attr)
+ {
+ case CPU_PPC8540:
+ case CPU_PPC8548:
+ return 4;
+
+ case CPU_CELL:
+ return (reload_completed ? 8 : 0);
+
+ default:
+ return 0;
+ }
}
/* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
return 1;
}
-/* Determine is PAT refers to memory. */
+/* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
+ and return true. */
static bool
-is_mem_ref (rtx pat)
+find_mem_ref (rtx pat, rtx *mem_ref)
{
const char * fmt;
int i, j;
- bool ret = false;
/* stack_tie does not produce any real memory traffic. */
if (tie_operand (pat, VOIDmode))
return false;
if (GET_CODE (pat) == MEM)
- return true;
+ {
+ *mem_ref = pat;
+ return true;
+ }
/* Recursively process the pattern. */
fmt = GET_RTX_FORMAT (GET_CODE (pat));
- for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0 && !ret; i--)
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- ret |= is_mem_ref (XEXP (pat, i));
+ {
+ if (find_mem_ref (XEXP (pat, i), mem_ref))
+ return true;
+ }
else if (fmt[i] == 'E')
for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
- ret |= is_mem_ref (XVECEXP (pat, i, j));
+ {
+ if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
+ return true;
+ }
}
- return ret;
+ return false;
}
/* Determine if PAT is a PATTERN of a load insn. */
static bool
-is_load_insn1 (rtx pat)
+is_load_insn1 (rtx pat, rtx *load_mem)
{
if (!pat || pat == NULL_RTX)
return false;
if (GET_CODE (pat) == SET)
- return is_mem_ref (SET_SRC (pat));
+ return find_mem_ref (SET_SRC (pat), load_mem);
if (GET_CODE (pat) == PARALLEL)
{
int i;
for (i = 0; i < XVECLEN (pat, 0); i++)
- if (is_load_insn1 (XVECEXP (pat, 0, i)))
+ if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
return true;
}
/* Determine if INSN loads from memory. */
static bool
-is_load_insn (rtx insn)
+is_load_insn (rtx insn, rtx *load_mem)
{
if (!insn || !INSN_P (insn))
return false;
if (GET_CODE (insn) == CALL_INSN)
return false;
- return is_load_insn1 (PATTERN (insn));
+ return is_load_insn1 (PATTERN (insn), load_mem);
}
/* Determine if PAT is a PATTERN of a store insn. */
static bool
-is_store_insn1 (rtx pat)
+is_store_insn1 (rtx pat, rtx *str_mem)
{
if (!pat || pat == NULL_RTX)
return false;
if (GET_CODE (pat) == SET)
- return is_mem_ref (SET_DEST (pat));
+ return find_mem_ref (SET_DEST (pat), str_mem);
if (GET_CODE (pat) == PARALLEL)
{
int i;
for (i = 0; i < XVECLEN (pat, 0); i++)
- if (is_store_insn1 (XVECEXP (pat, 0, i)))
+ if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
return true;
}
/* Determine if INSN stores to memory. */
static bool
-is_store_insn (rtx insn)
+is_store_insn (rtx insn, rtx *str_mem)
{
if (!insn || !INSN_P (insn))
return false;
- return is_store_insn1 (PATTERN (insn));
-}
-
-/* Return the dest of a store insn. */
-
-static rtx
-get_store_dest (rtx pat)
-{
- gcc_assert (is_store_insn1 (pat));
-
- if (GET_CODE (pat) == SET)
- return SET_DEST (pat);
- else if (GET_CODE (pat) == PARALLEL)
- {
- int i;
-
- for (i = 0; i < XVECLEN (pat, 0); i++)
- {
- rtx inner_pat = XVECEXP (pat, 0, i);
- if (GET_CODE (inner_pat) == SET
- && is_mem_ref (SET_DEST (inner_pat)))
- return inner_pat;
- }
- }
- /* We shouldn't get here, because we should have either a simple
- store insn or a store with update which are covered above. */
- gcc_unreachable();
+ return is_store_insn1 (PATTERN (insn), str_mem);
}
/* Returns whether the dependence between INSN and NEXT is considered
{
rtx insn;
rtx next;
+ rtx load_mem, str_mem;
/* If the flag is not enabled - no dependence is considered costly;
allow all dependent insns in the same group.
next = DEP_CON (dep);
if (rs6000_sched_costly_dep == store_to_load_dep_costly
- && is_load_insn (next)
- && is_store_insn (insn))
+ && is_load_insn (next, &load_mem)
+ && is_store_insn (insn, &str_mem))
/* Prevent load after store in the same group. */
return true;
if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
- && is_load_insn (next)
- && is_store_insn (insn)
- && DEP_TYPE (dep) == REG_DEP_TRUE)
+ && is_load_insn (next, &load_mem)
+ && is_store_insn (insn, &str_mem)
+ && DEP_TYPE (dep) == REG_DEP_TRUE
+ && mem_locations_overlap(str_mem, load_mem))
/* Prevent load after store in the same group if it is a true
dependence. */
return true;
{
int pos;
int i;
- rtx tmp;
+ rtx tmp, load_mem, str_mem;
- if (is_store_insn (last_scheduled_insn))
+ if (is_store_insn (last_scheduled_insn, &str_mem))
/* Issuing a store, swing the load_store_pendulum to the left */
load_store_pendulum--;
- else if (is_load_insn (last_scheduled_insn))
+ else if (is_load_insn (last_scheduled_insn, &load_mem))
/* Issuing a load, swing the load_store_pendulum to the right */
load_store_pendulum++;
else
while (pos >= 0)
{
- if (is_load_insn (ready[pos]))
+ if (is_load_insn (ready[pos], &load_mem))
{
/* Found a load. Move it to the head of the ready list,
and adjust it's priority so that it is more likely to
while (pos >= 0)
{
- if (is_load_insn (ready[pos])
+ if (is_load_insn (ready[pos], &load_mem)
&& !sel_sched_p ()
&& INSN_PRIORITY_KNOWN (ready[pos]))
{
while (pos >= 0)
{
- if (is_store_insn (ready[pos]))
+ if (is_store_insn (ready[pos], &str_mem))
{
+ rtx str_mem2;
/* Maintain the index of the first store found on the
list */
if (first_store_pos == -1)
first_store_pos = pos;
- if (is_store_insn (last_scheduled_insn)
- && adjacent_mem_locations (last_scheduled_insn,ready[pos]))
+ if (is_store_insn (last_scheduled_insn, &str_mem2)
+ && adjacent_mem_locations (str_mem, str_mem2))
{
/* Found an adjacent store. Move it to the head of the
ready list, and adjust it's priority so that it is
while (pos >= 0)
{
- if (is_store_insn (ready[pos])
+ if (is_store_insn (ready[pos], &str_mem)
&& !sel_sched_p ()
&& INSN_PRIORITY_KNOWN (ready[pos]))
{
if (!insn)
continue;
- FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
{
rtx next = DEP_CON (dep);
if (can_issue_more && !is_branch_slot_insn (next_insn))
can_issue_more--;
- while (can_issue_more > 0)
+ /* Power6 and Power7 have special group ending nop. */
+ if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7)
{
- nop = gen_nop ();
+ nop = gen_group_ending_nop ();
emit_insn_before (nop, next_insn);
- can_issue_more--;
+ can_issue_more = 0;
}
+ else
+ while (can_issue_more > 0)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ can_issue_more--;
+ }
*group_end = true;
return 0;
function descriptor name. This saves a lot of overriding code to
read the prefixes. */
+static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
static void
rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
{
while (!VEC_empty (branch_island, branch_islands))
{
- branch_island *bi = VEC_last (branch_island, branch_islands);
+ branch_island *bi = &VEC_last (branch_island, branch_islands);
const char *label = IDENTIFIER_POINTER (bi->label_name);
const char *name = IDENTIFIER_POINTER (bi->function_name);
char name_buf[512];
This differs from default_named_section_asm_out_constructor in
that we have special handling for -mrelocatable. */
+static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
static void
rs6000_elf_asm_out_constructor (rtx symbol, int priority)
{
assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
}
+static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
static void
rs6000_elf_asm_out_destructor (rtx symbol, int priority)
{
ASM_OUTPUT_LABEL (file, name);
}
+static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
static void
rs6000_elf_file_end (void)
{
/* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
static int
-rs6000_debug_address_cost (rtx x, bool speed)
+rs6000_debug_address_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED, bool speed)
{
int ret = TARGET_ADDRESS_COST (x, speed);
On RS/6000 an integer value is in r3 and a floating-point value is in
fp1, unless -msoft-float. */
-rtx
+static rtx
rs6000_function_value (const_tree valtype,
const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
bool outgoing ATTRIBUTE_UNUSED)
We need r30 if -mminimal-toc was specified, and there are constant pool
references. */
-bool
+static bool
rs6000_can_eliminate (const int from, const int to)
{
return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
{
if (regno <= 63 || write_symbols != DWARF2_DEBUG)
return regno;
- if (regno == MQ_REGNO)
- return 100;
if (regno == LR_REGNO)
return 108;
if (regno == CTR_REGNO)
#ifdef MASK_STRICT_ALIGN
{ "strict-align", MASK_STRICT_ALIGN, false, false },
#endif
- { "power", MASK_POWER, false, false },
- { "power2", MASK_POWER2, false, false },
- { "powerpc", MASK_POWERPC, false, false },
{ "soft-float", MASK_SOFT_FLOAT, false, false },
{ "string", MASK_STRING, false, false },
};
{ "frsqrte", RS6000_BTM_FRSQRTE, false, false },
{ "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
{ "popcntd", RS6000_BTM_POPCNTD, false, false },
- { "powerpc", RS6000_BTM_POWERPC, false, false },
{ "cell", RS6000_BTM_CELL, false, false },
};
bool offsettable_p,
bool reg_reg_p)
{
- rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
rtx addr = XEXP (stack, 0);
int strict_p = (reload_in_progress || reload_completed);
if (!legitimate_indirect_address_p (addr, strict_p))
{
if (offsettable_p
- && !rs6000_legitimate_offset_address_p (mode, addr, strict_p))
+ && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
/* This function emits the simple thunk routine that is used to preserve
the link stack on the 476 cpu. */
+static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
static void
rs6000_code_end (void)
{