/* Subroutines used for code generation on IBM RS/6000.
- Copyright (C) 1991-2018 Free Software Foundation, Inc.
+ Copyright (C) 1991-2019 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GCC.
#include "case-cfn-macros.h"
#include "ppc-auxv.h"
#include "tree-ssa-propagate.h"
+#include "tree-vrp.h"
+#include "tree-ssanames.h"
/* This file should be included last. */
#include "target-def.h"
static void macho_branch_islands (void);
static tree get_prev_label (tree);
#endif
-static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
- int, int *);
-static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
- int, int, int *);
static bool rs6000_mode_dependent_address (const_rtx);
static bool rs6000_debug_mode_dependent_address (const_rtx);
static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
static bool rs6000_save_toc_in_prologue_p (void);
static rtx rs6000_internal_arg_pointer (void);
-rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
- int, int *)
- = rs6000_legitimize_reload_address;
-
static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
= rs6000_mode_dependent_address;
/* Default register names. */
char rs6000_reg_names[][8] =
{
+ /* GPRs */
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ /* FPRs */
"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14", "15",
"16", "17", "18", "19", "20", "21", "22", "23",
"24", "25", "26", "27", "28", "29", "30", "31",
+ /* VRs */
"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14", "15",
"16", "17", "18", "19", "20", "21", "22", "23",
"24", "25", "26", "27", "28", "29", "30", "31",
- "mq", "lr", "ctr","ap",
+ /* lr ctr ca ap */
+ "lr", "ctr", "ca", "ap",
+ /* cr0..cr7 */
"0", "1", "2", "3", "4", "5", "6", "7",
- "ca",
- /* AltiVec registers. */
- "0", "1", "2", "3", "4", "5", "6", "7",
- "8", "9", "10", "11", "12", "13", "14", "15",
- "16", "17", "18", "19", "20", "21", "22", "23",
- "24", "25", "26", "27", "28", "29", "30", "31",
- "vrsave", "vscr",
- /* Soft frame pointer. */
- "sfp",
- /* HTM SPR registers. */
- "tfhar", "tfiar", "texasr"
+ /* vrsave vscr sfp */
+ "vrsave", "vscr", "sfp",
};
#ifdef TARGET_REGNAMES
static const char alt_reg_names[][8] =
{
- "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
- "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
- "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
- "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
- "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
- "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
- "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
- "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
- "mq", "lr", "ctr", "ap",
- "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
- "ca",
- /* AltiVec registers. */
- "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
+ /* GPRs */
+ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
+ "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
+ "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
+ /* FPRs */
+ "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
+ "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
+ "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
+ "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
+ /* VRs */
+ "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
"%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
"%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
"%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
- "vrsave", "vscr",
- /* Soft frame pointer. */
- "sfp",
- /* HTM SPR registers. */
- "tfhar", "tfiar", "texasr"
+ /* lr ctr ca ap */
+ "lr", "ctr", "ca", "ap",
+ /* cr0..cr7 */
+ "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
+ /* vrsave vscr sfp */
+ "vrsave", "vscr", "sfp",
};
#endif
#define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
#undef TARGET_MEMORY_MOVE_COST
#define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
+#undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
+#define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
+ rs6000_ira_change_pseudo_allocno_class
#undef TARGET_CANNOT_COPY_INSN_P
#define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
#undef TARGET_RTX_COSTS
/* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
static bool
-rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
+rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
+ unsigned int regno, machine_mode mode)
{
if (TARGET_32BIT
&& TARGET_POWERPC64
"f reg_class = %s\n"
"v reg_class = %s\n"
"wa reg_class = %s\n"
- "wb reg_class = %s\n"
"wd reg_class = %s\n"
"we reg_class = %s\n"
"wf reg_class = %s\n"
- "wg reg_class = %s\n"
- "wh reg_class = %s\n"
"wi reg_class = %s\n"
- "wj reg_class = %s\n"
- "wk reg_class = %s\n"
- "wl reg_class = %s\n"
- "wm reg_class = %s\n"
- "wo reg_class = %s\n"
"wp reg_class = %s\n"
"wq reg_class = %s\n"
"wr reg_class = %s\n"
"ws reg_class = %s\n"
"wt reg_class = %s\n"
- "wu reg_class = %s\n"
"wv reg_class = %s\n"
"ww reg_class = %s\n"
"wx reg_class = %s\n"
- "wy reg_class = %s\n"
- "wz reg_class = %s\n"
"wA reg_class = %s\n"
- "wH reg_class = %s\n"
- "wI reg_class = %s\n"
- "wJ reg_class = %s\n"
- "wK reg_class = %s\n"
"\n",
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
- reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
+ reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]]);
nl = "\n";
for (m = 0; m < NUM_MACHINE_MODES; ++m)
for (r = 32; r < 64; ++r)
rs6000_regno_regclass[r] = FLOAT_REGS;
- for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
+ for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
rs6000_regno_regclass[r] = NO_REGS;
for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
rs6000_regno_regclass[CA_REGNO] = NO_REGS;
rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
- rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
- rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
- rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
below. */
gcc_assert ((int)VECTOR_NONE == 0);
memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
- memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
+ memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
gcc_assert ((int)CODE_FOR_nothing == 0);
memset ((void *) ®_addr[0], '\0', sizeof (reg_addr));
wc - Reserved to represent individual CR bits (used in LLVM).
wd - Preferred register class for V2DFmode.
wf - Preferred register class for V4SFmode.
- wg - Float register for power6x move insns.
- wh - FP register for direct move instructions.
wi - FP or VSX register to hold 64-bit integers for VSX insns.
- wj - FP or VSX register to hold 64-bit integers for direct moves.
- wk - FP or VSX register to hold 64-bit doubles for direct moves.
- wl - Float register if we can do 32-bit signed int loads.
- wm - VSX register for ISA 2.07 direct move operations.
wn - always NO_REGS.
wr - GPR if 64-bit mode is permitted.
ws - Register class to do ISA 2.06 DF operations.
wt - VSX register for TImode in VSX registers.
- wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
ww - Register class to do SF conversions in with VSX operations.
- wx - Float register if we can do 32-bit int stores.
- wy - Register class to do ISA 2.07 SF operations.
- wz - Float register if we can do 32-bit unsigned int loads.
- wH - Altivec register if SImode is allowed in VSX registers.
- wI - VSX register if SImode is allowed in VSX registers.
- wJ - VSX register if QImode/HImode are allowed in VSX registers.
- wK - Altivec register if QImode/HImode are allowed in VSX registers. */
+ wx - Float register if we can do 32-bit int stores. */
if (TARGET_HARD_FLOAT)
{
if (TARGET_ALTIVEC)
rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
- if (TARGET_MFPGPR) /* DFmode */
- rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
-
- if (TARGET_LFIWAX)
- rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
-
- if (TARGET_DIRECT_MOVE)
- {
- rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
- rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
- = rs6000_constraints[RS6000_CONSTRAINT_wi];
- rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
- = rs6000_constraints[RS6000_CONSTRAINT_ws];
- rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
- }
-
if (TARGET_POWERPC64)
{
rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
}
if (TARGET_P8_VECTOR) /* SFmode */
- {
- rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
- rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
- rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
- }
+ rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
else if (TARGET_VSX)
rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
if (TARGET_STFIWX)
rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
- if (TARGET_LFIWZX)
- rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
-
if (TARGET_FLOAT128_TYPE)
{
rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
}
- if (TARGET_P9_VECTOR)
- {
- /* Support for new D-form instructions. */
- rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
-
- /* Support for ISA 3.0 (power9) vectors. */
- rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
- }
-
/* Support for new direct moves (ISA 3.0 + 64bit). */
if (TARGET_DIRECT_MOVE_128)
rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
- /* Support small integers in VSX registers. */
- if (TARGET_P8_VECTOR)
- {
- rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
- rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
- if (TARGET_P9_VECTOR)
- {
- rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
- rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
- }
- }
-
/* Set up the reload helper and direct move functions. */
if (TARGET_VSX || TARGET_ALTIVEC)
{
}
/* Precalculate HARD_REGNO_NREGS. */
- for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
+ for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
for (m = 0; m < NUM_MACHINE_MODES; ++m)
rs6000_hard_regno_nregs[m][r]
- = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
+ = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
/* Precalculate TARGET_HARD_REGNO_MODE_OK. */
- for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
+ for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
for (m = 0; m < NUM_MACHINE_MODES; ++m)
- if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
- rs6000_hard_regno_mode_ok_p[m][r] = true;
+ rs6000_hard_regno_mode_ok_p[m][r]
+ = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
/* Precalculate CLASS_MAX_NREGS sizes. */
for (c = 0; c < LIM_REG_CLASSES; ++c)
/* Don't override by the processor default if given explicitly. */
set_masks &= ~rs6000_isa_flags_explicit;
+ if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
+ rs6000_cpu_index = rs6000_dejagnu_cpu_index;
+
/* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
the cpu in a target attribute or pragma, but did not specify a tuning
option, use the cpu for the tuning option rather than the option specified
if (!TARGET_HARD_FLOAT)
{
if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
- msg = N_("-mvsx requires hardware floating point");
+ msg = N_("%<-mvsx%> requires hardware floating point");
else
{
rs6000_isa_flags &= ~ OPTION_MASK_VSX;
}
}
else if (TARGET_AVOID_XFORM > 0)
- msg = N_("-mvsx needs indexed addressing");
+ msg = N_("%<-mvsx%> needs indexed addressing");
else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC))
{
if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
- msg = N_("-mvsx and -mno-altivec are incompatible");
+ msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
else
- msg = N_("-mno-altivec disables vsx");
+ msg = N_("%<-mno-altivec%> disables vsx");
}
if (msg)
if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
{
if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
- warning (0, N_("-mquad-memory requires 64-bit mode"));
+ warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
- warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
+ warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
| OPTION_MASK_QUAD_MEMORY_ATOMIC);
if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
{
if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
- warning (0, N_("-mquad-memory is not available in little endian "
+ warning (0, N_("%<-mquad-memory%> is not available in little endian "
"mode"));
rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
if (main_target_opt != NULL
&& (main_target_opt->x_rs6000_long_double_type_size
!= default_long_double_size))
- error ("target attribute or pragma changes long double size");
+ error ("target attribute or pragma changes %<long double%> size");
else
rs6000_long_double_type_size = default_long_double_size;
}
{
warned_change_long_double = true;
if (TARGET_IEEEQUAD)
- warning (OPT_Wpsabi, "Using IEEE extended precision long double");
+ warning (OPT_Wpsabi, "Using IEEE extended precision "
+ "%<long double%>");
else
- warning (OPT_Wpsabi, "Using IBM extended precision long double");
+ warning (OPT_Wpsabi, "Using IBM extended precision "
+ "%<long double%>");
}
}
}
if (!TARGET_VSX)
{
if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
- error ("%qs requires VSX support", "-mfloat128");
+ error ("%qs requires VSX support", "%<-mfloat128%>");
TARGET_FLOAT128_TYPE = 0;
rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
else if (!TARGET_FLOAT128_TYPE)
{
TARGET_FLOAT128_TYPE = 1;
- warning (0, "The -mfloat128 option may not be fully supported");
+ warning (0, "The %<-mfloat128%> option may not be fully supported");
}
}
&& (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
{
if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
- error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
+ error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
}
if (TARGET_FLOAT128_HW && !TARGET_64BIT)
{
if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
- error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
+ error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
}
+ /* -mpcrel requires prefixed load/store addressing. */
+ if (TARGET_PCREL && !TARGET_PREFIXED_ADDR)
+ {
+ if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
+ error ("%qs requires %qs", "-mpcrel", "-mprefixed-addr");
+
+ rs6000_isa_flags &= ~OPTION_MASK_PCREL;
+ }
+
+ /* -mprefixed-addr (and hence -mpcrel) requires -mcpu=future. */
+ if (TARGET_PREFIXED_ADDR && !TARGET_FUTURE)
+ {
+ if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
+ error ("%qs requires %qs", "-mprefixed-addr", "-mcpu=future");
+
+ rs6000_isa_flags &= ~(OPTION_MASK_PCREL | OPTION_MASK_PREFIXED_ADDR);
+ }
+
/* Print the options after updating the defaults. */
if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
= rs6000_debug_can_change_mode_class;
rs6000_preferred_reload_class_ptr
= rs6000_debug_preferred_reload_class;
- rs6000_legitimize_reload_address_ptr
- = rs6000_debug_legitimize_reload_address;
rs6000_mode_dependent_address_ptr
= rs6000_debug_mode_dependent_address;
}
&& rs6000_tune != PROCESSOR_POWER7
&& rs6000_tune != PROCESSOR_POWER8
&& rs6000_tune != PROCESSOR_POWER9
+ && rs6000_tune != PROCESSOR_FUTURE
&& rs6000_tune != PROCESSOR_PPCA2
&& rs6000_tune != PROCESSOR_CELL
&& rs6000_tune != PROCESSOR_PPC476);
|| rs6000_tune == PROCESSOR_POWER7
|| rs6000_tune == PROCESSOR_POWER8
|| rs6000_tune == PROCESSOR_POWER9
+ || rs6000_tune == PROCESSOR_FUTURE
|| rs6000_tune == PROCESSOR_PPCE500MC
|| rs6000_tune == PROCESSOR_PPCE500MC64
|| rs6000_tune == PROCESSOR_PPCE5500
break;
case PROCESSOR_POWER9:
+ case PROCESSOR_FUTURE:
rs6000_cost = &power9_cost;
break;
/* Default CPU string for rs6000*_file_start functions. */
static const char *rs6000_default_cpu;
+#ifdef USING_ELFOS_H
+static const char *rs6000_machine;
+
+static const char *
+rs6000_machine_from_flags (void)
+{
+ if ((rs6000_isa_flags & (ISA_FUTURE_MASKS_SERVER & ~ISA_3_0_MASKS_SERVER))
+ != 0)
+ return "future";
+ if ((rs6000_isa_flags & (ISA_3_0_MASKS_SERVER & ~ISA_2_7_MASKS_SERVER)) != 0)
+ return "power9";
+ if ((rs6000_isa_flags & (ISA_2_7_MASKS_SERVER & ~ISA_2_6_MASKS_SERVER)) != 0)
+ return "power8";
+ if ((rs6000_isa_flags & (ISA_2_6_MASKS_SERVER & ~ISA_2_5_MASKS_SERVER)) != 0)
+ return "power7";
+ if ((rs6000_isa_flags & (ISA_2_5_MASKS_SERVER & ~ISA_2_4_MASKS)) != 0)
+ return "power6";
+ if ((rs6000_isa_flags & (ISA_2_4_MASKS & ~ISA_2_1_MASKS)) != 0)
+ return "power5";
+ if ((rs6000_isa_flags & ISA_2_1_MASKS) != 0)
+ return "power4";
+ if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
+ return "ppc64";
+ return "ppc";
+}
+
+static void
+emit_asm_machine (void)
+{
+ fprintf (asm_out_file, "\t.machine %s\n", rs6000_machine);
+}
+#endif
+
/* Do anything needed at the start of the asm file. */
static void
}
#ifdef USING_ELFOS_H
+ rs6000_machine = rs6000_machine_from_flags ();
if (!(rs6000_default_cpu && rs6000_default_cpu[0])
&& !global_options_set.x_rs6000_cpu_index)
- {
- fputs ("\t.machine ", asm_out_file);
- if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
- fputs ("power9\n", asm_out_file);
- else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
- fputs ("power8\n", asm_out_file);
- else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
- fputs ("power7\n", asm_out_file);
- else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
- fputs ("power6\n", asm_out_file);
- else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
- fputs ("power5\n", asm_out_file);
- else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
- fputs ("power4\n", asm_out_file);
- else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
- fputs ("ppc64\n", asm_out_file);
- else
- fputs ("ppc\n", asm_out_file);
- }
+ emit_asm_machine ();
#endif
if (DEFAULT_ABI == ABI_ELFv2)
else if (mode == V2DImode)
{
- if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
- || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
+ if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
+ || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
return false;
if (zero_constant (op, mode))
default:
break;
case E_V1TImode:
- gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
emit_move_insn (target, gen_lowpart (TImode, vec));
break;
case E_V2DFmode:
switch (mode)
{
+ case E_V1TImode:
+ emit_move_insn (target, gen_lowpart (TImode, vec));
+ return;
+
case E_V2DFmode:
emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
return;
}
}
- gcc_assert (CONST_INT_P (elt));
-
/* Allocate mode-sized buffer. */
mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
emit_move_insn (mem, vec);
+ if (CONST_INT_P (elt))
+ {
+ int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
- /* Add offset to field within buffer matching vector element. */
- mem = adjust_address_nv (mem, inner_mode,
- INTVAL (elt) * GET_MODE_SIZE (inner_mode));
-
- emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
-}
-
-/* Helper function to return the register number of a RTX. */
-static inline int
-regno_or_subregno (rtx op)
-{
- if (REG_P (op))
- return REGNO (op);
- else if (SUBREG_P (op))
- return subreg_regno (op);
+ /* Add offset to field within buffer matching vector element. */
+ mem = adjust_address_nv (mem, inner_mode,
+ modulo_elt * GET_MODE_SIZE (inner_mode));
+ emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
+ }
else
- gcc_unreachable ();
+ {
+ unsigned int ele_size = GET_MODE_SIZE (inner_mode);
+ rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
+ rtx new_addr = gen_reg_rtx (Pmode);
+
+ elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
+ if (ele_size > 1)
+ elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
+ new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
+ new_addr = change_address (mem, inner_mode, new_addr);
+ emit_move_insn (target, new_addr);
+ }
}
/* Adjust a memory address (MEM) of a vector type to point to a scalar field
{
rtx op1 = XEXP (new_addr, 1);
addr_mask_type addr_mask;
- int scalar_regno = regno_or_subregno (scalar_reg);
+ unsigned int scalar_regno = reg_or_subregno (scalar_reg);
- gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
+ gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
if (INT_REGNO_P (scalar_regno))
addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
rtx tmp_altivec)
{
machine_mode mode = GET_MODE (src);
- machine_mode scalar_mode = GET_MODE (dest);
+ machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
int byte_shift = exact_log2 (scalar_size);
systems. */
if (MEM_P (src))
{
+ int num_elements = GET_MODE_NUNITS (mode);
+ rtx num_ele_m1 = GEN_INT (num_elements - 1);
+
+ emit_insn (gen_anddi3 (element, element, num_ele_m1));
gcc_assert (REG_P (tmp_gpr));
emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
tmp_gpr, scalar_mode));
else if (REG_P (src) || SUBREG_P (src))
{
- int bit_shift = byte_shift + 3;
+ int num_elements = GET_MODE_NUNITS (mode);
+ int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
+ int bit_shift = 7 - exact_log2 (num_elements);
rtx element2;
- int dest_regno = regno_or_subregno (dest);
- int src_regno = regno_or_subregno (src);
- int element_regno = regno_or_subregno (element);
+ unsigned int dest_regno = reg_or_subregno (dest);
+ unsigned int src_regno = reg_or_subregno (src);
+ unsigned int element_regno = reg_or_subregno (element);
gcc_assert (REG_P (tmp_gpr));
{
if (!BYTES_BIG_ENDIAN)
{
- rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
+ rtx num_ele_m1 = GEN_INT (num_elements - 1);
emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
tmp_altivec));
emit_move_insn (tmp_gpr_di, tmp_altivec_di);
- emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
- GEN_INT (64 - (8 * scalar_size))));
+ emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
+ GEN_INT (64 - bits_in_element)));
return;
}
if (DEFAULT_ABI != ABI_V4)
return 0;
- if (GET_CODE (op) == SYMBOL_REF)
+ if (SYMBOL_REF_P (op))
sym_ref = op;
else if (GET_CODE (op) != CONST
|| GET_CODE (XEXP (op, 0)) != PLUS
- || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
- || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
+ || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
+ || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
return 0;
else
bool
direct_move_p (rtx op0, rtx op1)
{
- int regno0, regno1;
-
if (!REG_P (op0) || !REG_P (op1))
return false;
- if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
+ if (!TARGET_DIRECT_MOVE)
return false;
- regno0 = REGNO (op0);
- regno1 = REGNO (op1);
- if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
+ int regno0 = REGNO (op0);
+ int regno1 = REGNO (op1);
+ if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
return false;
- if (INT_REGNO_P (regno0))
- return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
-
- else if (INT_REGNO_P (regno1))
- {
- if (TARGET_MFPGPR && FP_REGNO_P (regno0))
- return true;
+ if (INT_REGNO_P (regno0) && VSX_REGNO_P (regno1))
+ return true;
- else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
- return true;
- }
+ if (VSX_REGNO_P (regno0) && INT_REGNO_P (regno1))
+ return true;
return false;
}
Accept direct, indexed, offset, lo_sum and tocref. Since this is
a constraint function we know the operand has satisfied a suitable
- memory predicate. Also accept some odd rtl generated by reload
- (see rs6000_legitimize_reload_address for various forms). It is
- important that reload rtl be accepted by appropriate constraints
- but not by the operand predicate.
+ memory predicate.
Offsetting a lo_sum should not be allowed, except where we know by
- alignment that a 32k boundary is not crossed, but see the ???
- comment in rs6000_legitimize_reload_address. Note that by
+ alignment that a 32k boundary is not crossed. Note that by
"offsetting" here we mean a further offset to access parts of the
MEM. It's fine to have a lo_sum where the inner address is offset
from a sym, since the same sym+offset will appear in the high part
{
int regnum;
- if (GET_CODE (op) == REG)
+ if (REG_P (op))
regnum = REGNO (op);
else if (GET_CODE (op) == PLUS
- && GET_CODE (XEXP (op, 0)) == REG
- && GET_CODE (XEXP (op, 1)) == CONST_INT)
+ && REG_P (XEXP (op, 0))
+ && CONST_INT_P (XEXP (op, 1)))
regnum = REGNO (XEXP (op, 0));
else
tree decl;
unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
- if (GET_CODE (op) != SYMBOL_REF)
+ if (!SYMBOL_REF_P (op))
return false;
/* ISA 3.0 vector d-form addressing is restricted, don't allow
rtx base, offset;
split_const (op, &base, &offset);
- return (GET_CODE (base) == SYMBOL_REF
+ return (SYMBOL_REF_P (base)
&& CONSTANT_POOL_ADDRESS_P (base)
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
}
{
return (DEFAULT_ABI == ABI_V4
&& !flag_pic && !TARGET_TOC
- && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
+ && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
&& small_data_operand (x, mode));
}
return virtual_stack_registers_memory_p (x);
if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
return true;
- if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (x, 1)))
return false;
offset = INTVAL (XEXP (x, 1));
bool
legitimate_indirect_address_p (rtx x, int strict)
{
- return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
+ return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
}
bool
macho_lo_sum_memory_operand (rtx x, machine_mode mode)
{
if (!TARGET_MACHO || !flag_pic
- || mode != SImode || GET_CODE (x) != MEM)
+ || mode != SImode || !MEM_P (x))
return false;
x = XEXP (x, 0);
if (GET_CODE (x) != LO_SUM)
return false;
- if (GET_CODE (XEXP (x, 0)) != REG)
+ if (!REG_P (XEXP (x, 0)))
return false;
if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
return false;
{
if (GET_CODE (x) != LO_SUM)
return false;
- if (GET_CODE (XEXP (x, 0)) != REG)
+ if (!REG_P (XEXP (x, 0)))
return false;
if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
return false;
recognizes some LO_SUM addresses as valid although this
function says opposite. In most cases, LRA through different
transformations can generate correct code for address reloads.
- It can not manage only some LO_SUM cases. So we need to add
- code analogous to one in rs6000_legitimize_reload_address for
- LOW_SUM here saying that some addresses are still valid. */
+ It cannot manage only some LO_SUM cases. So we need to add
+ code here saying that some addresses are still valid. */
large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
&& small_toc_ref (x, VOIDmode));
if (TARGET_TOC && ! large_toc_ok)
else
return force_reg (Pmode, x);
}
- if (GET_CODE (x) == SYMBOL_REF)
+ if (SYMBOL_REF_P (x))
{
enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
if (model != 0)
}
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1))
&& ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
>= 0x10000 - extra))
{
return plus_constant (Pmode, sum, low_int);
}
else if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
- && GET_CODE (XEXP (x, 1)) != CONST_INT
+ && REG_P (XEXP (x, 0))
+ && !CONST_INT_P (XEXP (x, 1))
&& GET_MODE_NUNITS (mode) == 1
&& (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
|| (/* ??? Assume floating point reg based on mode? */
)
&& TARGET_32BIT
&& TARGET_NO_TOC
- && ! flag_pic
- && GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_WIDE_INT
- && GET_CODE (x) != CONST_DOUBLE
+ && !flag_pic
+ && !CONST_INT_P (x)
+ && !CONST_WIDE_INT_P (x)
+ && !CONST_DOUBLE_P (x)
&& CONSTANT_P (x)
&& GET_MODE_NUNITS (mode) == 1
&& (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
return gen_rtx_LO_SUM (Pmode, reg, x);
}
else if (TARGET_TOC
- && GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_P (x)
&& constant_pool_expr_p (x)
&& ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
return create_TOC_reference (x, NULL_RTX);
output_addr_const (file, x);
if (TARGET_ELF)
fputs ("@dtprel+0x8000", file);
- else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
+ else if (TARGET_XCOFF && SYMBOL_REF_P (x))
{
switch (SYMBOL_REF_TLS_MODEL (x))
{
static bool
rs6000_real_tls_symbol_ref_p (rtx x)
{
- return (GET_CODE (x) == SYMBOL_REF
+ return (SYMBOL_REF_P (x)
&& SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
}
{
rtx x, y, offset;
+ if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
+ orig_x = XVECEXP (orig_x, 0, 0);
+
orig_x = delegitimize_mem_from_attrs (orig_x);
+
x = orig_x;
if (MEM_P (x))
x = XEXP (x, 0);
y = x;
- if (TARGET_CMODEL != CMODEL_SMALL
- && GET_CODE (y) == LO_SUM)
+ if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
y = XEXP (y, 1);
offset = NULL_RTX;
y = XEXP (y, 0);
}
- if (GET_CODE (y) == UNSPEC
- && XINT (y, 1) == UNSPEC_TOCREL)
+ if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
{
y = XVECEXP (y, 0, 0);
/* Do not associate thread-local symbols with the original
constant pool symbol. */
if (TARGET_XCOFF
- && GET_CODE (y) == SYMBOL_REF
+ && SYMBOL_REF_P (y)
&& CONSTANT_POOL_ADDRESS_P (y)
&& rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
return orig_x;
&& GET_CODE (XEXP (orig_x, 1)) == CONST)
{
y = XEXP (XEXP (orig_x, 1), 0);
- if (GET_CODE (y) == UNSPEC
- && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
+ if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
return XVECEXP (y, 0, 0);
}
{
if (GET_CODE (x) == UNSPEC)
return true;
- if (GET_CODE (x) == SYMBOL_REF
+ if (SYMBOL_REF_P (x)
&& CONSTANT_POOL_ADDRESS_P (x))
{
rtx c = get_pool_constant (x);
return dest;
}
-/* Mess with a call, to make it look like the tls_gdld insns when
- !TARGET_TLS_MARKERS. These insns have an extra unspec to
- differentiate them from standard calls, because they need to emit
- the arg setup insns as well as the actual call. That keeps the
- arg setup insns immediately adjacent to the branch and link. */
+/* Output arg setup instructions for a !TARGET_TLS_MARKERS
+ __tls_get_addr call. */
-static void
-edit_tls_call_insn (rtx arg)
-{
- rtx call_insn = last_call_insn ();
- if (!TARGET_TLS_MARKERS)
- {
- rtx patt = PATTERN (call_insn);
- gcc_assert (GET_CODE (patt) == PARALLEL);
- rtvec orig = XVEC (patt, 0);
- rtvec v = rtvec_alloc (GET_NUM_ELEM (orig) + 1);
- gcc_assert (GET_NUM_ELEM (orig) > 0);
- /* The (set (..) (call (mem ..))). */
- RTVEC_ELT (v, 0) = RTVEC_ELT (orig, 0);
- /* The extra unspec. */
- RTVEC_ELT (v, 1) = arg;
- /* All other assorted call pattern pieces. */
- for (int i = 1; i < GET_NUM_ELEM (orig); i++)
- RTVEC_ELT (v, i + 1) = RTVEC_ELT (orig, i);
- XVEC (patt, 0) = v;
- }
- if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
- use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
- pic_offset_table_rtx);
+void
+rs6000_output_tlsargs (rtx *operands)
+{
+ /* Set up operands for output_asm_insn, without modifying OPERANDS. */
+ rtx op[3];
+
+ /* The set dest of the call, ie. r3, which is also the first arg reg. */
+ op[0] = operands[0];
+ /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
+ op[1] = XVECEXP (operands[2], 0, 0);
+ if (XINT (operands[2], 1) == UNSPEC_TLSGD)
+ {
+ /* The GOT register. */
+ op[2] = XVECEXP (operands[2], 0, 1);
+ if (TARGET_CMODEL != CMODEL_SMALL)
+ output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
+ "addi %0,%0,%1@got@tlsgd@l", op);
+ else
+ output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
+ }
+ else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
+ {
+ if (TARGET_CMODEL != CMODEL_SMALL)
+ output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
+ "addi %0,%0,%&@got@tlsld@l", op);
+ else
+ output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
+ }
+ else
+ gcc_unreachable ();
}
/* Passes the tls arg value for global dynamic and local dynamic
{
rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
UNSPEC_TLSGD);
+ tga = rs6000_tls_get_addr ();
global_tlsarg = arg;
- rtx argreg = const0_rtx;
if (TARGET_TLS_MARKERS)
{
- argreg = gen_rtx_REG (Pmode, 3);
+ rtx argreg = gen_rtx_REG (Pmode, 3);
emit_insn (gen_rtx_SET (argreg, arg));
+ emit_library_call_value (tga, dest, LCT_CONST, Pmode,
+ argreg, Pmode);
}
-
- tga = rs6000_tls_get_addr ();
- emit_library_call_value (tga, dest, LCT_CONST, Pmode,
- argreg, Pmode);
+ else
+ emit_library_call_value (tga, dest, LCT_CONST, Pmode);
global_tlsarg = NULL_RTX;
- edit_tls_call_insn (arg);
+ /* Make a note so that the result of this call can be CSEd. */
+ rtvec vec = gen_rtvec (1, copy_rtx (arg));
+ rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
}
else if (model == TLS_MODEL_LOCAL_DYNAMIC)
{
- rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got),
- UNSPEC_TLSLD);
+ rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
+ tga = rs6000_tls_get_addr ();
+ tmp1 = gen_reg_rtx (Pmode);
global_tlsarg = arg;
- rtx argreg = const0_rtx;
if (TARGET_TLS_MARKERS)
{
- argreg = gen_rtx_REG (Pmode, 3);
+ rtx argreg = gen_rtx_REG (Pmode, 3);
emit_insn (gen_rtx_SET (argreg, arg));
+ emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
+ argreg, Pmode);
}
-
- tga = rs6000_tls_get_addr ();
- tmp1 = gen_reg_rtx (Pmode);
- emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
- argreg, Pmode);
+ else
+ emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
global_tlsarg = NULL_RTX;
- edit_tls_call_insn (arg);
+ /* Make a note so that the result of this call can be CSEd. */
+ rtvec vec = gen_rtvec (1, copy_rtx (arg));
+ rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
if (rs6000_tls_size == 16)
{
/* A TLS symbol in the TOC cannot contain a sum. */
if (GET_CODE (x) == CONST
&& GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
&& SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
return true;
&& GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
}
-/* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
- replace the input X, or the original X if no replacement is called for.
- The output parameter *WIN is 1 if the calling macro should goto WIN,
- 0 if it should not.
-
- For RS/6000, we wish to handle large displacements off a base
- register by splitting the addend across an addiu/addis and the mem insn.
- This cuts number of extra insns needed from 3 to 1.
-
- On Darwin, we use this to generate code for floating point constants.
- A movsf_low is generated so we wind up with 2 instructions rather than 3.
- The Darwin code is inside #if TARGET_MACHO because only then are the
- machopic_* functions defined. */
-static rtx
-rs6000_legitimize_reload_address (rtx x, machine_mode mode,
- int opnum, int type,
- int ind_levels ATTRIBUTE_UNUSED, int *win)
-{
- bool reg_offset_p = reg_offset_addressing_ok_p (mode);
- bool quad_offset_p = mode_supports_dq_form (mode);
-
- /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
- DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
- if (reg_offset_p
- && opnum == 1
- && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
- || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
- || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
- && TARGET_P9_VECTOR)
- || (mode == SImode && recog_data.operand_mode[0] == V4SImode
- && TARGET_P9_VECTOR)))
- reg_offset_p = false;
-
- /* We must recognize output that we have already generated ourselves. */
- if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
- {
- if (TARGET_DEBUG_ADDR)
- {
- fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
- debug_rtx (x);
- }
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- *win = 1;
- return x;
- }
-
- /* Likewise for (lo_sum (high ...) ...) output we have generated. */
- if (GET_CODE (x) == LO_SUM
- && GET_CODE (XEXP (x, 0)) == HIGH)
- {
- if (TARGET_DEBUG_ADDR)
- {
- fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
- debug_rtx (x);
- }
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- *win = 1;
- return x;
- }
-
-#if TARGET_MACHO
- if (DEFAULT_ABI == ABI_DARWIN && flag_pic
- && GET_CODE (x) == LO_SUM
- && GET_CODE (XEXP (x, 0)) == PLUS
- && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
- && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
- && machopic_operand_p (XEXP (x, 1)))
- {
- /* Result of previous invocation of this function on Darwin
- floating point constant. */
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- *win = 1;
- return x;
- }
-#endif
-
- if (TARGET_CMODEL != CMODEL_SMALL
- && reg_offset_p
- && !quad_offset_p
- && small_toc_ref (x, VOIDmode))
- {
- rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
- x = gen_rtx_LO_SUM (Pmode, hi, x);
- if (TARGET_DEBUG_ADDR)
- {
- fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
- debug_rtx (x);
- }
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- *win = 1;
- return x;
- }
-
- if (GET_CODE (x) == PLUS
- && REG_P (XEXP (x, 0))
- && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
- && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
- && CONST_INT_P (XEXP (x, 1))
- && reg_offset_p
- && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
- {
- HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
- HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
- HOST_WIDE_INT high
- = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
-
- /* Check for 32-bit overflow or quad addresses with one of the
- four least significant bits set. */
- if (high + low != val
- || (quad_offset_p && (low & 0xf)))
- {
- *win = 0;
- return x;
- }
-
- /* Reload the high part into a base reg; leave the low part
- in the mem directly. */
-
- x = gen_rtx_PLUS (GET_MODE (x),
- gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
- GEN_INT (high)),
- GEN_INT (low));
-
- if (TARGET_DEBUG_ADDR)
- {
- fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
- debug_rtx (x);
- }
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- *win = 1;
- return x;
- }
-
- if (GET_CODE (x) == SYMBOL_REF
- && reg_offset_p
- && !quad_offset_p
- && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
-#if TARGET_MACHO
- && DEFAULT_ABI == ABI_DARWIN
- && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
- && machopic_symbol_defined_p (x)
-#else
- && DEFAULT_ABI == ABI_V4
- && !flag_pic
-#endif
- /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
- The same goes for DImode without 64-bit gprs and DFmode and DDmode
- without fprs.
- ??? Assume floating point reg based on mode? This assumption is
- violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
- where reload ends up doing a DFmode load of a constant from
- mem using two gprs. Unfortunately, at this point reload
- hasn't yet selected regs so poking around in reload data
- won't help and even if we could figure out the regs reliably,
- we'd still want to allow this transformation when the mem is
- naturally aligned. Since we say the address is good here, we
- can't disable offsets from LO_SUMs in mem_operand_gpr.
- FIXME: Allow offset from lo_sum for other modes too, when
- mem is sufficiently aligned.
-
- Also disallow this if the type can go in VMX/Altivec registers, since
- those registers do not have d-form (reg+offset) address modes. */
- && !reg_addr[mode].scalar_in_vmx_p
- && mode != TFmode
- && mode != TDmode
- && mode != IFmode
- && mode != KFmode
- && (mode != TImode || !TARGET_VSX)
- && mode != PTImode
- && (mode != DImode || TARGET_POWERPC64)
- && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
- || TARGET_HARD_FLOAT))
- {
-#if TARGET_MACHO
- if (flag_pic)
- {
- rtx offset = machopic_gen_offset (x);
- x = gen_rtx_LO_SUM (GET_MODE (x),
- gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
- gen_rtx_HIGH (Pmode, offset)), offset);
- }
- else
-#endif
- x = gen_rtx_LO_SUM (GET_MODE (x),
- gen_rtx_HIGH (Pmode, x), x);
-
- if (TARGET_DEBUG_ADDR)
- {
- fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
- debug_rtx (x);
- }
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- *win = 1;
- return x;
- }
-
- /* Reload an offset address wrapped by an AND that represents the
- masking of the lower bits. Strip the outer AND and let reload
- convert the offset address into an indirect address. For VSX,
- force reload to create the address with an AND in a separate
- register, because we can't guarantee an altivec register will
- be used. */
- if (VECTOR_MEM_ALTIVEC_P (mode)
- && GET_CODE (x) == AND
- && GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
- && GET_CODE (XEXP (x, 1)) == CONST_INT
- && INTVAL (XEXP (x, 1)) == -16)
- {
- x = XEXP (x, 0);
- *win = 1;
- return x;
- }
-
- if (TARGET_TOC
- && reg_offset_p
- && !quad_offset_p
- && GET_CODE (x) == SYMBOL_REF
- && use_toc_relative_ref (x, mode))
- {
- x = create_TOC_reference (x, NULL_RTX);
- if (TARGET_CMODEL != CMODEL_SMALL)
- {
- if (TARGET_DEBUG_ADDR)
- {
- fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
- debug_rtx (x);
- }
- push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
- opnum, (enum reload_type) type);
- }
- *win = 1;
- return x;
- }
- *win = 0;
- return x;
-}
-
-/* Debug version of rs6000_legitimize_reload_address. */
-static rtx
-rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
- int opnum, int type,
- int ind_levels, int *win)
-{
- rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
- ind_levels, win);
- fprintf (stderr,
- "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
- "type = %d, ind_levels = %d, win = %d, original addr:\n",
- GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
- debug_rtx (x);
-
- if (x == ret)
- fprintf (stderr, "Same address returned\n");
- else if (!ret)
- fprintf (stderr, "NULL returned\n");
- else
- {
- fprintf (stderr, "New address:\n");
- debug_rtx (ret);
- }
-
- return ret;
-}
-
/* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
that is a valid memory address for an instruction.
The MODE argument is the machine mode for the MEM expression
/* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
if (VECTOR_MEM_ALTIVEC_P (mode)
&& GET_CODE (x) == AND
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) == -16)
x = XEXP (x, 0);
if (! reg_ok_strict
&& reg_offset_p
&& GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
+ && REG_P (XEXP (x, 0))
&& (XEXP (x, 0) == virtual_stack_vars_rtx
|| XEXP (x, 0) == arg_pointer_rtx)
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (x, 1)))
return 1;
if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
return 1;
been rejected as illegitimate. */
if (XEXP (addr, 0) != virtual_stack_vars_rtx
&& XEXP (addr, 0) != arg_pointer_rtx
- && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (addr, 1)))
{
unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
{
case PROCESSOR_POWER8:
case PROCESSOR_POWER9:
+ case PROCESSOR_FUTURE:
if (DECIMAL_FLOAT_MODE_P (mode))
return 1;
if (VECTOR_MODE_P (mode))
if (TARGET_DEBUG_TARGET)
fprintf (stderr, "rs6000_conditional_register_usage called\n");
- /* Set MQ register fixed (already call_used) so that it will not be
- allocated. */
- fixed_regs[64] = 1;
-
/* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
if (TARGET_64BIT)
fixed_regs[13] = call_used_regs[13]
static void
rs6000_eliminate_indexed_memrefs (rtx operands[2])
{
- if (GET_CODE (operands[0]) == MEM
- && GET_CODE (XEXP (operands[0], 0)) != REG
+ if (MEM_P (operands[0])
+ && !REG_P (XEXP (operands[0], 0))
&& ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
GET_MODE (operands[0]), false))
operands[0]
= replace_equiv_address (operands[0],
copy_addr_to_reg (XEXP (operands[0], 0)));
- if (GET_CODE (operands[1]) == MEM
- && GET_CODE (XEXP (operands[1], 0)) != REG
+ if (MEM_P (operands[1])
+ && !REG_P (XEXP (operands[1], 0))
&& ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
GET_MODE (operands[1]), false))
operands[1]
if (MEM_P (source))
{
- gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
+ gcc_assert (REG_P (dest) || SUBREG_P (dest));
rs6000_emit_le_vsx_load (dest, source, mode);
}
else
static bool
rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
{
- if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
+ if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
&& (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
&& SUBREG_P (source) && sf_subreg_operand (source, mode))
{
/* Check if GCC is setting up a block move that will end up using FP
registers as temporaries. We must make sure this is acceptable. */
- if (GET_CODE (operands[0]) == MEM
- && GET_CODE (operands[1]) == MEM
+ if (MEM_P (operands[0])
+ && MEM_P (operands[1])
&& mode == DImode
&& (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
|| rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
return;
}
- if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
+ if (can_create_pseudo_p () && MEM_P (operands[0])
&& !gpc_reg_operand (operands[1], mode))
operands[1] = force_reg (mode, operands[1]);
tmp = XEXP (XEXP (tmp, 0), 0);
}
- gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
+ gcc_assert (SYMBOL_REF_P (tmp));
model = SYMBOL_REF_TLS_MODEL (tmp);
gcc_assert (model != 0);
p1:SD) if p1 is not of floating point class and p0 is spilled as
we can have no analogous movsd_store for this. */
if (lra_in_progress && mode == DDmode
- && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
+ && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
&& reg_preferred_class (REGNO (operands[0])) == NO_REGS
- && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
+ && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
&& GET_MODE (SUBREG_REG (operands[1])) == SDmode)
{
enum reg_class cl;
int regno = REGNO (SUBREG_REG (operands[1]));
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
{
cl = reg_preferred_class (regno);
regno = reg_renumber[regno];
}
if (lra_in_progress
&& mode == SDmode
- && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
+ && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
&& reg_preferred_class (REGNO (operands[0])) == NO_REGS
&& (REG_P (operands[1])
- || (GET_CODE (operands[1]) == SUBREG
- && REG_P (SUBREG_REG (operands[1])))))
+ || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
{
- int regno = REGNO (GET_CODE (operands[1]) == SUBREG
- ? SUBREG_REG (operands[1]) : operands[1]);
+ int regno = reg_or_subregno (operands[1]);
enum reg_class cl;
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
{
cl = reg_preferred_class (regno);
gcc_assert (cl != NO_REGS);
p:DD)) if p0 is not of floating point class and p1 is spilled as
we can have no analogous movsd_load for this. */
if (lra_in_progress && mode == DDmode
- && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
+ && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
&& GET_MODE (SUBREG_REG (operands[0])) == SDmode
- && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
+ && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
&& reg_preferred_class (REGNO (operands[1])) == NO_REGS)
{
enum reg_class cl;
int regno = REGNO (SUBREG_REG (operands[0]));
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
{
cl = reg_preferred_class (regno);
regno = reg_renumber[regno];
if (lra_in_progress
&& mode == SDmode
&& (REG_P (operands[0])
- || (GET_CODE (operands[0]) == SUBREG
- && REG_P (SUBREG_REG (operands[0]))))
- && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
+ || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
+ && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
&& reg_preferred_class (REGNO (operands[1])) == NO_REGS)
{
- int regno = REGNO (GET_CODE (operands[0]) == SUBREG
- ? SUBREG_REG (operands[0]) : operands[0]);
+ int regno = reg_or_subregno (operands[0]);
enum reg_class cl;
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
{
cl = reg_preferred_class (regno);
gcc_assert (cl != NO_REGS);
case E_HImode:
case E_QImode:
if (CONSTANT_P (operands[1])
- && GET_CODE (operands[1]) != CONST_INT)
+ && !CONST_INT_P (operands[1]))
operands[1] = force_const_mem (mode, operands[1]);
break;
if (TARGET_ELF
&& mode == Pmode
&& DEFAULT_ABI == ABI_V4
- && (GET_CODE (operands[1]) == SYMBOL_REF
+ && (SYMBOL_REF_P (operands[1])
|| GET_CODE (operands[1]) == CONST)
&& small_data_operand (operands[1], mode))
{
&& mode == Pmode
&& CONSTANT_P (operands[1])
&& GET_CODE (operands[1]) != HIGH
- && GET_CODE (operands[1]) != CONST_INT)
+ && !CONST_INT_P (operands[1]))
{
rtx target = (!can_create_pseudo_p ()
? operands[0]
/* If this is a function address on -mcall-aixdesc,
convert it to the address of the descriptor. */
if (DEFAULT_ABI == ABI_AIX
- && GET_CODE (operands[1]) == SYMBOL_REF
+ && SYMBOL_REF_P (operands[1])
&& XSTR (operands[1], 0)[0] == '.')
{
const char *name = XSTR (operands[1], 0);
and we have put it in the TOC, we just need to make a TOC-relative
reference to it. */
if (TARGET_TOC
- && GET_CODE (operands[1]) == SYMBOL_REF
+ && SYMBOL_REF_P (operands[1])
&& use_toc_relative_ref (operands[1], mode))
operands[1] = create_TOC_reference (operands[1], operands[0]);
else if (mode == Pmode
&& GET_CODE (XEXP (operands[1], 0)) == PLUS
&& add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
&& (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
- || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
+ || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
&& ! side_effects_p (operands[0]))
{
rtx sym =
operands[1] = force_const_mem (mode, operands[1]);
if (TARGET_TOC
- && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
+ && SYMBOL_REF_P (XEXP (operands[1], 0))
&& use_toc_relative_ref (XEXP (operands[1], 0), mode))
{
rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
/* Above, we may have called force_const_mem which may have returned
an invalid address. If we can, fix this up; otherwise, reload will
have to deal with it. */
- if (GET_CODE (operands[1]) == MEM)
+ if (MEM_P (operands[1]))
operands[1] = validize_mem (operands[1]);
emit_insn (gen_rtx_SET (operands[0], operands[1]));
cum->fregno++;
if (USE_FP_FOR_ARG_P (cum, elt_mode)
- && !(TARGET_AIX && !TARGET_ELF && AGGREGATE_TYPE_P (type)))
+ && !(TARGET_AIX && !TARGET_ELF
+ && type != NULL && AGGREGATE_TYPE_P (type)))
{
rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
rtx r, off;
{
warned = true;
inform (input_location,
- "the ABI of passing homogeneous float aggregates"
+ "the ABI of passing homogeneous %<float%> aggregates"
" has changed in GCC 5");
}
}
align_words = rs6000_parm_start (mode, type, cum->words);
if (USE_FP_FOR_ARG_P (cum, elt_mode)
- && !(TARGET_AIX && !TARGET_ELF && AGGREGATE_TYPE_P (type)))
+ && !(TARGET_AIX && !TARGET_ELF
+ && type != NULL && AGGREGATE_TYPE_P (type)))
{
unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
{
rtx reg_save_area
= assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
- gcc_assert (GET_CODE (reg_save_area) == MEM);
+ gcc_assert (MEM_P (reg_save_area));
reg_save_area = XEXP (reg_save_area, 0);
if (GET_CODE (reg_save_area) == PLUS)
{
gcc_assert (XEXP (reg_save_area, 0)
== virtual_stack_vars_rtx);
- gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
+ gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
offset += INTVAL (XEXP (reg_save_area, 1));
}
else
if (icode == CODE_FOR_rs6000_mffsl
&& rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
{
- error ("__builtin_mffsl() not supported with -msoft-float");
+ error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
return const0_rtx;
}
if (arg0 == error_mark_node || arg1 == error_mark_node)
return const0_rtx;
- if (GET_CODE (op0) != CONST_INT
+ if (!CONST_INT_P (op0)
|| INTVAL (op0) > 255
|| INTVAL (op0) < 0)
{
if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
{
- error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
+ error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
+ "%<-msoft-float%>");
return const0_rtx;
}
if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
{
- error ("__builtin_set_fpscr_rn not supported with -msoft-float");
+ error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
return const0_rtx;
}
compile time if the argument is a variable. The least significant two
bits of the argument, regardless of type, are used to set the rounding
mode. All other bits are ignored. */
- if (GET_CODE (op0) == CONST_INT && !const_0_to_3_operand(op0, VOIDmode))
+ if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
{
error ("Argument must be a value between 0 and 3.");
return const0_rtx;
if (TARGET_32BIT)
/* Builtin not supported in 32-bit mode. */
fatal_error (input_location,
- "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
+ "%<__builtin_set_fpscr_drn%> is not supported "
+ "in 32-bit mode");
if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
{
- error ("__builtin_set_fpscr_drn not supported with -msoft-float");
+ error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
return const0_rtx;
}
compile time if the argument is a variable. The least significant two
bits of the argument, regardless of type, are used to set the rounding
mode. All other bits are ignored. */
- if (GET_CODE (op0) == CONST_INT && !const_0_to_7_operand(op0, VOIDmode))
+ if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
{
error ("Argument must be a value between 0 and 7.");
return const0_rtx;
|| icode == CODE_FOR_altivec_vspltisw)
{
/* Only allow 5-bit *signed* literals. */
- if (GET_CODE (op0) != CONST_INT
+ if (!CONST_INT_P (op0)
|| INTVAL (op0) > 15
|| INTVAL (op0) < -16)
{
return TEXASRU_SPR;
}
-/* Return the appropriate SPR regno associated with the given builtin. */
-static inline HOST_WIDE_INT
-htm_spr_regno (enum rs6000_builtins code)
-{
- if (code == HTM_BUILTIN_GET_TFHAR
- || code == HTM_BUILTIN_SET_TFHAR)
- return TFHAR_REGNO;
- else if (code == HTM_BUILTIN_GET_TFIAR
- || code == HTM_BUILTIN_SET_TFIAR)
- return TFIAR_REGNO;
- gcc_assert (code == HTM_BUILTIN_GET_TEXASR
- || code == HTM_BUILTIN_SET_TEXASR
- || code == HTM_BUILTIN_GET_TEXASRU
- || code == HTM_BUILTIN_SET_TEXASRU);
- return TEXASR_REGNO;
-}
-
/* Return the correct ICODE value depending on whether we are
setting or reading the HTM SPRs. */
static inline enum insn_code
{
machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
- op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
}
/* If this builtin accesses a CR, then pass in a scratch
CR as the last operand. */
if (!(attr & RS6000_BTC_VOID))
expected_nopnds += 1;
if (uses_spr)
- expected_nopnds += 2;
+ expected_nopnds += 1;
gcc_assert (nopnds == expected_nopnds
&& nopnds <= MAX_HTM_OPERANDS);
if (TREE_CODE (arg2) != INTEGER_CST
|| wi::geu_p (wi::to_wide (arg2), 16))
{
- error ("argument 3 must be in the range 0..15");
+ error ("argument 3 must be in the range [0, 15]");
return CONST0_RTX (tmode);
}
}
if (!tree_fits_uhwi_p (arg)
|| (elt = tree_to_uhwi (arg), elt > max))
{
- error ("selector must be an integer constant in the range 0..%wi", max);
+ error ("selector must be an integer constant in the range [0, %wi]", max);
return 0;
}
op0 = expand_normal (arg0);
op1 = expand_normal (arg1);
- /* Call get_element_number to validate arg1 if it is a constant. */
if (TREE_CODE (arg1) == INTEGER_CST)
- (void) get_element_number (TREE_TYPE (arg0), arg1);
+ {
+ unsigned HOST_WIDE_INT elt;
+ unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
+ unsigned int truncated_selector;
+ /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
+ returns low-order bits of INTEGER_CST for modulo indexing. */
+ elt = TREE_INT_CST_LOW (arg1);
+ truncated_selector = elt % size;
+ op1 = GEN_INT (truncated_selector);
+ }
tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
mode0 = TYPE_MODE (TREE_TYPE (arg0));
if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
{
- error ("second argument to %qs must be 0..12", "vec_vextract4b");
+ error ("second argument to %qs must be [0, 12]", "vec_vextract4b");
return expand_call (exp, target, false);
}
break;
if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
{
- error ("third argument to %qs must be 0..12", "vec_vinsert4b");
+ error ("third argument to %qs must be [0, 12]", "vec_vinsert4b");
return expand_call (exp, target, false);
}
break;
error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
name);
else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
- error ("builtin function %qs requires the %qs option", name, "-mfloat128");
+ error ("builtin function %qs requires the %qs option", name,
+ "%<-mfloat128%>");
else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
== (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
error ("builtin function %qs requires the %qs (or newer), and "
case ALTIVEC_BUILTIN_VSRAH:
case ALTIVEC_BUILTIN_VSRAW:
case P8V_BUILTIN_VSRAD:
- arg0 = gimple_call_arg (stmt, 0);
- arg1 = gimple_call_arg (stmt, 1);
- lhs = gimple_call_lhs (stmt);
- g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
- gimple_set_location (g, gimple_location (stmt));
- gsi_replace (gsi, g, true);
- return true;
+ {
+ arg0 = gimple_call_arg (stmt, 0);
+ arg1 = gimple_call_arg (stmt, 1);
+ lhs = gimple_call_lhs (stmt);
+ tree arg1_type = TREE_TYPE (arg1);
+ tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
+ tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
+ location_t loc = gimple_location (stmt);
+ /* Force arg1 into the range valid matching the arg0 type. */
+ /* Build a vector consisting of the max valid bit-size values. */
+ int n_elts = VECTOR_CST_NELTS (arg1);
+ tree element_size = build_int_cst (unsigned_element_type,
+ 128 / n_elts);
+ tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
+ for (int i = 0; i < n_elts; i++)
+ elts.safe_push (element_size);
+ tree modulo_tree = elts.build ();
+ /* Modulo the provided shift value against that vector. */
+ gimple_seq stmts = NULL;
+ tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
+ unsigned_arg1_type, arg1);
+ tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
+ unsigned_arg1_type, unsigned_arg1,
+ modulo_tree);
+ gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+ /* And finally, do the shift. */
+ g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
+ gimple_set_location (g, loc);
+ gsi_replace (gsi, g, true);
+ return true;
+ }
/* Flavors of vector shift left.
builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
case ALTIVEC_BUILTIN_VSLB:
arg0 = gimple_call_arg (stmt, 0);
arg1 = gimple_call_arg (stmt, 1);
lhs = gimple_call_lhs (stmt);
+ tree arg1_type = TREE_TYPE (arg1);
+ tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
+ tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
+ location_t loc = gimple_location (stmt);
gimple_seq stmts = NULL;
/* Convert arg0 to unsigned. */
tree arg0_unsigned
= gimple_build (&stmts, VIEW_CONVERT_EXPR,
unsigned_type_for (TREE_TYPE (arg0)), arg0);
+ /* Force arg1 into the range valid matching the arg0 type. */
+ /* Build a vector consisting of the max valid bit-size values. */
+ int n_elts = VECTOR_CST_NELTS (arg1);
+ tree element_size = build_int_cst (unsigned_element_type,
+ 128 / n_elts);
+ tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
+ for (int i = 0; i < n_elts; i++)
+ elts.safe_push (element_size);
+ tree modulo_tree = elts.build ();
+ /* Modulo the provided shift value against that vector. */
+ tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
+ unsigned_arg1_type, arg1);
+ tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
+ unsigned_arg1_type, unsigned_arg1,
+ modulo_tree);
+ /* Do the shift. */
tree res
= gimple_build (&stmts, RSHIFT_EXPR,
- TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
+ TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
/* Convert result back to the lhs type. */
res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
arg1_type, temp_addr,
build_int_cst (arg1_type, -16));
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+ if (!is_gimple_mem_ref_addr (aligned_addr))
+ {
+ tree t = make_ssa_name (TREE_TYPE (aligned_addr));
+ gimple *g = gimple_build_assign (t, aligned_addr);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ aligned_addr = t;
+ }
/* Use the build2 helper to set up the mem_ref. The MEM_REF could also
take an offset, but since we've already incorporated the offset
above, here we just pass in a zero. */
arg2_type, temp_addr,
build_int_cst (arg2_type, -16));
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+ if (!is_gimple_mem_ref_addr (aligned_addr))
+ {
+ tree t = make_ssa_name (TREE_TYPE (aligned_addr));
+ gimple *g = gimple_build_assign (t, aligned_addr);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ aligned_addr = t;
+ }
/* The desired gimple result should be similar to:
MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
gimple *g
tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
arg1_type, arg1, temp_offset);
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+ if (!is_gimple_mem_ref_addr (temp_addr))
+ {
+ tree t = make_ssa_name (TREE_TYPE (temp_addr));
+ gimple *g = gimple_build_assign (t, temp_addr);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ temp_addr = t;
+ }
/* Use the build2 helper to set up the mem_ref. The MEM_REF could also
take an offset, but since we've already incorporated the offset
above, here we just pass in a zero. */
tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
arg2_type, arg2, temp_offset);
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+ if (!is_gimple_mem_ref_addr (temp_addr))
+ {
+ tree t = make_ssa_name (TREE_TYPE (temp_addr));
+ gimple *g = gimple_build_assign (t, temp_addr);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ temp_addr = t;
+ }
gimple *g;
g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
build_int_cst (arg2_type, 0)), arg0);
case ALTIVEC_BUILTIN_VSPLTISH:
case ALTIVEC_BUILTIN_VSPLTISW:
{
- int size;
- if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
- size = 8;
- else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
- size = 16;
- else
- size = 32;
-
arg0 = gimple_call_arg (stmt, 0);
lhs = gimple_call_lhs (stmt);
/* Only fold the vec_splat_*() if the lower bits of arg 0 is a
5-bit signed constant in range -16 to +15. */
if (TREE_CODE (arg0) != INTEGER_CST
- || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0), size),
- -16, 15))
+ || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
return false;
gimple_seq stmts = NULL;
location_t loc = gimple_location (stmt);
{
/* unsigned 1 argument functions. */
case CRYPTO_BUILTIN_VSBOX:
+ case CRYPTO_BUILTIN_VSBOX_BE:
case P8V_BUILTIN_VGBBD:
case MISC_BUILTIN_CDTBCD:
case MISC_BUILTIN_CBCDTD:
case ALTIVEC_BUILTIN_VMULOUH:
case P8V_BUILTIN_VMULOUW:
case CRYPTO_BUILTIN_VCIPHER:
+ case CRYPTO_BUILTIN_VCIPHER_BE:
case CRYPTO_BUILTIN_VCIPHERLAST:
+ case CRYPTO_BUILTIN_VCIPHERLAST_BE:
case CRYPTO_BUILTIN_VNCIPHER:
+ case CRYPTO_BUILTIN_VNCIPHER_BE:
case CRYPTO_BUILTIN_VNCIPHERLAST:
+ case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
case CRYPTO_BUILTIN_VPMSUMB:
case CRYPTO_BUILTIN_VPMSUMH:
case CRYPTO_BUILTIN_VPMSUMW:
registers_ok_for_quad_peep (rtx reg1, rtx reg2)
{
/* We might have been passed a SUBREG. */
- if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ if (!REG_P (reg1) || !REG_P (reg2))
return 0;
/* We might have been passed non floating point registers. */
if (GET_CODE (addr1) == PLUS)
{
/* If not a REG, return zero. */
- if (GET_CODE (XEXP (addr1, 0)) != REG)
+ if (!REG_P (XEXP (addr1, 0)))
return 0;
else
{
reg1 = REGNO (XEXP (addr1, 0));
/* The offset must be constant! */
- if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (addr1, 1)))
return 0;
offset1 = INTVAL (XEXP (addr1, 1));
}
}
- else if (GET_CODE (addr1) != REG)
+ else if (!REG_P (addr1))
return 0;
else
{
if (GET_CODE (addr2) == PLUS)
{
/* If not a REG, return zero. */
- if (GET_CODE (XEXP (addr2, 0)) != REG)
+ if (!REG_P (XEXP (addr2, 0)))
return 0;
else
{
reg2 = REGNO (XEXP (addr2, 0));
/* The offset must be constant. */
- if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (addr2, 1)))
return 0;
offset2 = INTVAL (XEXP (addr2, 1));
}
}
- else if (GET_CODE (addr2) != REG)
+ else if (!REG_P (addr2))
return 0;
else
{
HOST_WIDE_INT regno;
enum reg_class rclass;
- if (GET_CODE (reg) == SUBREG)
+ if (SUBREG_P (reg))
reg = SUBREG_REG (reg);
if (!REG_P (reg))
return NO_REG_TYPE;
regno = REGNO (reg);
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
{
if (!lra_in_progress && !reload_completed)
return PSEUDO_REG_TYPE;
regno = true_regnum (reg);
- if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
+ if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
return PSEUDO_REG_TYPE;
}
case AND:
and_arg = XEXP (addr, 0);
if (GET_MODE_SIZE (mode) != 16
- || GET_CODE (XEXP (addr, 1)) != CONST_INT
+ || !CONST_INT_P (XEXP (addr, 1))
|| INTVAL (XEXP (addr, 1)) != -16)
{
fail_msg = "bad Altivec AND #1";
return true;
}
- /* Power6+: MFTGPR or MFFGPR. */
- else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
- && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
- || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
- return true;
-
/* Move to/from SPR. */
else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
&& ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
/* Allow subreg of memory before/during reload. */
bool memory_p = (MEM_P (x)
- || (!reload_completed && GET_CODE (x) == SUBREG
+ || (!reload_completed && SUBREG_P (x)
&& MEM_P (SUBREG_REG (x))));
sri->icode = CODE_FOR_nothing;
if (!done_p && reg_addr[mode].scalar_in_vmx_p
&& !mode_supports_vmx_dform (mode)
&& (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
- && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
+ && (memory_p || CONST_DOUBLE_P (x)))
{
ret = FLOAT_REGS;
default_p = false;
rtx cc_clobber;
rtvec rv;
- if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
+ if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
|| !base_reg_operand (scratch, GET_MODE (scratch)))
rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
op1 = XEXP (addr, 1);
if ((addr_mask & RELOAD_REG_AND_M16) == 0)
{
- if (REG_P (op0) || GET_CODE (op0) == SUBREG)
+ if (REG_P (op0) || SUBREG_P (op0))
op_reg = op0;
else if (GET_CODE (op1) == PLUS)
debug_rtx (scratch);
}
- gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
- gcc_assert (GET_CODE (mem) == MEM);
+ gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
+ gcc_assert (MEM_P (mem));
rclass = REGNO_REG_CLASS (regno);
gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
addr = XEXP (mem, 0);
return NO_REGS;
}
- if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
+ if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
return GENERAL_REGS;
return rclass;
On Darwin, pic addresses require a load from memory, which
needs a base register. */
if (rclass != BASE_REGS
- && (GET_CODE (in) == SYMBOL_REF
+ && (SYMBOL_REF_P (in)
|| GET_CODE (in) == HIGH
|| GET_CODE (in) == LABEL_REF
|| GET_CODE (in) == CONST))
return BASE_REGS;
}
- if (GET_CODE (in) == REG)
+ if (REG_P (in))
{
regno = REGNO (in);
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
{
regno = true_regnum (in);
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
regno = -1;
}
}
- else if (GET_CODE (in) == SUBREG)
+ else if (SUBREG_P (in))
{
regno = true_regnum (in);
- if (regno >= FIRST_PSEUDO_REGISTER)
+ if (!HARD_REGISTER_NUM_P (regno))
regno = -1;
}
else
/* Constants, memory, and FP registers can go into FP registers. */
if ((regno == -1 || FP_REGNO_P (regno))
- && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
+ && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
/* Memory, and AltiVec registers can go into AltiVec registers. */
/* Constants. */
else if (dest_regno >= 0
- && (GET_CODE (src) == CONST_INT
- || GET_CODE (src) == CONST_WIDE_INT
- || GET_CODE (src) == CONST_DOUBLE
+ && (CONST_INT_P (src)
+ || CONST_WIDE_INT_P (src)
+ || CONST_DOUBLE_P (src)
|| GET_CODE (src) == CONST_VECTOR))
{
if (dest_gpr_p)
return ggc_cleared_alloc<machine_function> ();
}
\f
-#define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
+#define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
/* Write out a function code label. */
case 'G':
/* X is a constant integer. If it is negative, print "m",
otherwise print "z". This is to make an aze or ame insn. */
- if (GET_CODE (x) != CONST_INT)
+ if (!CONST_INT_P (x))
output_operand_lossage ("invalid %%G value");
else if (INTVAL (x) >= 0)
putc ('z', file);
if (GET_CODE (x) == CONST)
{
if (GET_CODE (XEXP (x, 0)) != PLUS
- || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
+ || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
&& GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
- || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
+ || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
output_operand_lossage ("invalid %%K value");
}
print_operand_address (file, x);
case 'P':
/* The operand must be an indirect memory reference. The result
is the register name. */
- if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
+ if (!MEM_P (x) || !REG_P (XEXP (x, 0))
|| REGNO (XEXP (x, 0)) >= 32)
output_operand_lossage ("invalid %%P value");
else
/* Print the symbolic name of a branch target register. */
if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
x = XVECEXP (x, 0, 0);
- if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
- && REGNO (x) != CTR_REGNO))
+ if (!REG_P (x) || (REGNO (x) != LR_REGNO
+ && REGNO (x) != CTR_REGNO))
output_operand_lossage ("invalid %%T value");
else if (REGNO (x) == LR_REGNO)
fputs ("lr", file);
case 'x':
/* X is a FPR or Altivec register used in a VSX context. */
- if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
+ if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
output_operand_lossage ("invalid %%x value");
else
{
if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
&& GET_CODE (tmp) == AND
- && GET_CODE (XEXP (tmp, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (tmp, 1))
&& INTVAL (XEXP (tmp, 1)) == -16)
tmp = XEXP (tmp, 0);
else if (VECTOR_MEM_VSX_P (GET_MODE (x))
{
if (REG_P (x))
fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
- else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
+
+ /* Is it a pc-relative address? */
+ else if (pcrel_address (x, Pmode))
+ {
+ HOST_WIDE_INT offset;
+
+ if (GET_CODE (x) == CONST)
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == PLUS)
+ {
+ offset = INTVAL (XEXP (x, 1));
+ x = XEXP (x, 0);
+ }
+ else
+ offset = 0;
+
+ output_addr_const (file, x);
+
+ if (offset)
+ fprintf (file, "%+" PRId64, offset);
+
+ fputs ("@pcrel", file);
+ }
+ else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
|| GET_CODE (x) == LABEL_REF)
{
output_addr_const (file, x);
reg_names[ REGNO (XEXP (x, 1)) ]);
}
else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
- && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ && CONST_INT_P (XEXP (x, 1)))
fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
#if TARGET_MACHO
switch (XINT (x, 1))
{
case UNSPEC_TOCREL:
- gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
+ gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
&& REG_P (XVECEXP (x, 0, 1))
&& REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
output_addr_const (file, XVECEXP (x, 0, 0));
/* Remove initial .'s to turn a -mcall-aixdesc function
address into the address of the descriptor, not the function
itself. */
- else if (GET_CODE (x) == SYMBOL_REF
+ else if (SYMBOL_REF_P (x)
&& XSTR (x, 0)[0] == '.'
&& DEFAULT_ABI == ABI_AIX)
{
(DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
? "+32768" : ""));
- static char str[32]; /* 2 spare */
- if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
+ static char str[32]; /* 1 spare */
+ if (rs6000_pcrel_p (cfun))
+ sprintf (str, "b%s %s@notoc%s", sibcall ? "" : "l", z, arg);
+ else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
sibcall ? "" : "\n\tnop");
else if (DEFAULT_ABI == ABI_V4)
bool sibcall)
{
/* -Wformat-overflow workaround, without which gcc thinks that %u
- might produce 10 digits. */
+ might produce 10 digits. Note that -Wformat-overflow will not
+ currently warn here for str[], so do not rely on a warning to
+ ensure str[] is correctly sized. */
gcc_assert (funop <= MAX_RECOG_OPERANDS);
- static char str[144]; /* 1 spare */
+ /* Currently, funop is either 0 or 1. The maximum string is always
+ a !speculate 64-bit __tls_get_addr call.
+
+ ABI_ELFv2, pcrel:
+ . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
+ . 35 .reloc .,R_PPC64_PLTSEQ_NOTOC,%z1\n\t
+ . 9 crset 2\n\t
+ . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
+ . 36 .reloc .,R_PPC64_PLTCALL_NOTOC,%z1\n\t
+ . 8 beq%T1l-
+ .---
+ .142
+
+ ABI_AIX:
+ . 9 ld 2,%3\n\t
+ . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
+ . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
+ . 9 crset 2\n\t
+ . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
+ . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
+ . 10 beq%T1l-\n\t
+ . 10 ld 2,%4(1)
+ .---
+ .151
+
+ ABI_ELFv2:
+ . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
+ . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
+ . 9 crset 2\n\t
+ . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
+ . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
+ . 10 beq%T1l-\n\t
+ . 10 ld 2,%3(1)
+ .---
+ .142
+
+ ABI_V4:
+ . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
+ . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
+ . 9 crset 2\n\t
+ . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
+ . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
+ . 8 beq%T1l-
+ .---
+ .141 */
+ static char str[160]; /* 8 spare */
char *s = str;
const char *ptrload = TARGET_64BIT ? "d" : "wz";
|| (REG_P (operands[funop])
&& REGNO (operands[funop]) == LR_REGNO));
- if (!TARGET_MACHO && HAVE_AS_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
+ if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
{
const char *rel64 = TARGET_64BIT ? "64" : "";
char tls[29];
tls[0] = 0;
- if (GET_CODE (operands[funop + 1]) == UNSPEC)
+ if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
{
if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
gcc_unreachable ();
}
+ const char *notoc = rs6000_pcrel_p (cfun) ? "_NOTOC" : "";
const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
&& flag_pic == 2 ? "+32768" : "");
if (!speculate)
{
s += sprintf (s,
- "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
- tls, rel64, funop, addend);
+ "%s.reloc .,R_PPC%s_PLTSEQ%s,%%z%u%s\n\t",
+ tls, rel64, notoc, funop, addend);
s += sprintf (s, "crset 2\n\t");
}
s += sprintf (s,
- "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
- tls, rel64, funop, addend);
+ "%s.reloc .,R_PPC%s_PLTCALL%s,%%z%u%s\n\t",
+ tls, rel64, notoc, funop, addend);
}
else if (!speculate)
s += sprintf (s, "crset 2\n\t");
- if (DEFAULT_ABI == ABI_AIX)
+ if (rs6000_pcrel_p (cfun))
+ {
+ if (speculate)
+ sprintf (s, "b%%T%ul", funop);
+ else
+ sprintf (s, "beq%%T%ul-", funop);
+ }
+ else if (DEFAULT_ABI == ABI_AIX)
{
if (speculate)
sprintf (s,
}
#if HAVE_AS_PLTSEQ
-/* Output indirect call insns.
- WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
+/* Output indirect call insns. WHICH identifies the type of sequence. */
const char *
rs6000_pltseq_template (rtx *operands, int which)
{
const char *rel64 = TARGET_64BIT ? "64" : "";
- char tls[28];
+ char tls[30];
tls[0] = 0;
- if (GET_CODE (operands[3]) == UNSPEC)
+ if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
{
+ char off = which == RS6000_PLTSEQ_PLT_PCREL34 ? '8' : '4';
if (XINT (operands[3], 1) == UNSPEC_TLSGD)
- sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
- rel64);
+ sprintf (tls, ".reloc .-%c,R_PPC%s_TLSGD,%%3\n\t",
+ off, rel64);
else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
- sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
- rel64);
+ sprintf (tls, ".reloc .-%c,R_PPC%s_TLSLD,%%&\n\t",
+ off, rel64);
else
gcc_unreachable ();
}
gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
- static char str[96]; /* 15 spare */
- const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
+ static char str[96]; /* 10 spare */
+ char off = WORDS_BIG_ENDIAN ? '2' : '4';
const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
&& flag_pic == 2 ? "+32768" : "");
switch (which)
{
- case 0:
+ case RS6000_PLTSEQ_TOCSAVE:
sprintf (str,
- "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
- "st%s",
- tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
+ "st%s\n\t"
+ "%s.reloc .-4,R_PPC%s_PLTSEQ,%%z2",
+ TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)",
+ tls, rel64);
break;
- case 1:
+ case RS6000_PLTSEQ_PLT16_HA:
if (DEFAULT_ABI == ABI_V4 && !flag_pic)
sprintf (str,
- "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
- "lis %%0,0",
+ "lis %%0,0\n\t"
+ "%s.reloc .-%c,R_PPC%s_PLT16_HA,%%z2",
tls, off, rel64);
else
sprintf (str,
- "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
- "addis %%0,%%1,0",
+ "addis %%0,%%1,0\n\t"
+ "%s.reloc .-%c,R_PPC%s_PLT16_HA,%%z2%s",
tls, off, rel64, addend);
break;
- case 2:
+ case RS6000_PLTSEQ_PLT16_LO:
sprintf (str,
- "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
- "l%s %%0,0(%%1)",
- tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
- TARGET_64BIT ? "d" : "wz");
+ "l%s %%0,0(%%1)\n\t"
+ "%s.reloc .-%c,R_PPC%s_PLT16_LO%s,%%z2%s",
+ TARGET_64BIT ? "d" : "wz",
+ tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend);
break;
- case 3:
+ case RS6000_PLTSEQ_MTCTR:
sprintf (str,
- "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
- "mtctr %%1",
+ "mtctr %%1\n\t"
+ "%s.reloc .-4,R_PPC%s_PLTSEQ,%%z2%s",
tls, rel64, addend);
break;
+ case RS6000_PLTSEQ_PLT_PCREL34:
+ sprintf (str,
+ "pl%s %%0,0(0),1\n\t"
+ "%s.reloc .-8,R_PPC%s_PLT_PCREL34_NOTOC,%%z2",
+ TARGET_64BIT ? "d" : "wz",
+ tls, rel64);
+ break;
default:
gcc_unreachable ();
}
}
#endif
+/* Helper function to return whether a MODE can do prefixed loads/stores.
+ VOIDmode is used when we are loading the pc-relative address into a base
+ register, but we are not using it as part of a memory operation. As modes
+ add support for prefixed memory, they will be added here. */
+
+static bool
+mode_supports_prefixed_address_p (machine_mode mode)
+{
+ return mode == VOIDmode;
+}
+
+/* Function to return true if ADDR is a valid prefixed memory address that uses
+ mode MODE. */
+
+bool
+rs6000_prefixed_address (rtx addr, machine_mode mode)
+{
+ if (!TARGET_PREFIXED_ADDR || !mode_supports_prefixed_address_p (mode))
+ return false;
+
+ /* Check for PC-relative addresses. */
+ if (pcrel_address (addr, Pmode))
+ return true;
+
+ /* Check for prefixed memory addresses that have a large numeric offset,
+ or an offset that can't be used for a DS/DQ-form memory operation. */
+ if (GET_CODE (addr) == PLUS)
+ {
+ rtx op0 = XEXP (addr, 0);
+ rtx op1 = XEXP (addr, 1);
+
+ if (!base_reg_operand (op0, Pmode) || !CONST_INT_P (op1))
+ return false;
+
+ HOST_WIDE_INT value = INTVAL (op1);
+ if (!SIGNED_34BIT_OFFSET_P (value, 0))
+ return false;
+
+ /* Offset larger than 16-bits? */
+ if (!SIGNED_16BIT_OFFSET_P (value, 0))
+ return true;
+
+ /* DQ instruction (bottom 4 bits must be 0) for vectors. */
+ HOST_WIDE_INT mask;
+ if (GET_MODE_SIZE (mode) >= 16)
+ mask = 15;
+
+ /* DS instruction (bottom 2 bits must be 0). For 32-bit integers, we
+ need to use DS instructions if we are sign-extending the value with
+ LWA. For 32-bit floating point, we need DS instructions to load and
+ store values to the traditional Altivec registers. */
+ else if (GET_MODE_SIZE (mode) >= 4)
+ mask = 3;
+
+ /* QImode/HImode has no restrictions. */
+ else
+ return true;
+
+ /* Return true if we must use a prefixed instruction. */
+ return (value & mask) != 0;
+ }
+
+ return false;
+}
+\f
#if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
/* Emit an assembler directive to set symbol visibility for DECL to
VISIBILITY_TYPE. */
/* If we have an unsigned compare, make sure we don't have a signed value as
an immediate. */
- if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
+ if (comp_mode == CCUNSmode && CONST_INT_P (op1)
&& INTVAL (op1) < 0)
{
op0 = copy_rtx_if_shared (op0);
would treat EQ different to UNORDERED, we can't do it. */
if (HONOR_INFINITIES (compare_mode)
&& code != GT && code != UNGE
- && (GET_CODE (op1) != CONST_DOUBLE
+ && (!CONST_DOUBLE_P (op1)
|| real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
/* Constructs of the form (a OP b ? a : b) are safe. */
&& ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
emit_insn (TARGET_32BIT
? (TARGET_POWERPC64
? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
- : gen_movsi_update (breg, breg, delta_rtx, nsrc))
+ : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
: gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
used_update = true;
}
static bool
save_reg_p (int reg)
{
- /* We need to mark the PIC offset register live for the same conditions
- as it is set up, or otherwise it won't be saved before we clobber it. */
-
if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
{
/* When calling eh_return, we must return true for all the cases
where conditional_register_usage marks the PIC offset reg
- call used. */
+ call used or fixed. */
+ if (crtl->calls_eh_return
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
+ || (TARGET_TOC && TARGET_MINIMAL_TOC)))
+ return true;
+
+ /* We need to mark the PIC offset register live for the same
+ conditions as it is set up in rs6000_emit_prologue, or
+ otherwise it won't be saved before we clobber it. */
if (TARGET_TOC && TARGET_MINIMAL_TOC
- && (crtl->calls_eh_return
- || df_regs_ever_live_p (reg)
- || !constant_pool_empty_p ()))
+ && !constant_pool_empty_p ())
return true;
- if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
+ if (DEFAULT_ABI == ABI_V4
+ && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
+ && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
+ return true;
+
+ if (DEFAULT_ABI == ABI_DARWIN
&& flag_pic && crtl->uses_pic_offset_table)
return true;
}
fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
+ if (info->abi == ABI_DARWIN)
+ fprintf (stderr, "\tWORLD_SAVE_P = %5d\n", WORLD_SAVE_P(info));
+
fprintf (stderr, "\n");
}
return get_hard_reg_initial_val (Pmode, LR_REGNO);
}
+/* Helper function for rs6000_function_ok_for_sibcall. */
+
+static bool
+rs6000_decl_ok_for_sibcall (tree decl)
+{
+ /* Sibcalls are always fine for the Darwin ABI. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ return true;
+
+ if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
+ {
+ /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
+ functions, because the callee may have a different TOC pointer to
+ the caller and there's no way to ensure we restore the TOC when
+ we return. */
+ if (!decl || DECL_EXTERNAL (decl) || DECL_WEAK (decl)
+ || !(*targetm.binds_local_p) (decl))
+ return false;
+
+ /* Similarly, if the caller preserves the TOC pointer and the callee
+ doesn't (or vice versa), proper TOC setup or restoration will be
+ missed. For example, suppose A, B, and C are in the same binary
+ and A -> B -> C. A and B preserve the TOC pointer but C does not,
+ and B -> C is eligible as a sibcall. A will call B through its
+ local entry point, so A will not restore its TOC itself. B calls
+ C with a sibcall, so it will not restore the TOC. C does not
+ preserve the TOC, so it may clobber r2 with impunity. Returning
+ from C will result in a corrupted TOC for A. */
+ else if (rs6000_fndecl_pcrel_p (decl) != rs6000_pcrel_p (cfun))
+ return false;
+
+ else
+ return true;
+ }
+
+ /* With the secure-plt SYSV ABI we can't make non-local calls when
+ -fpic/PIC because the plt call stubs use r30. */
+ if (DEFAULT_ABI != ABI_V4
+ || (TARGET_SECURE_PLT
+ && flag_pic
+ && (!decl || !((*targetm.binds_local_p) (decl)))))
+ return false;
+
+ return true;
+}
+
/* Say whether a function is a candidate for sibcall handling or not. */
static bool
return false;
}
- /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
- functions, because the callee may have a different TOC pointer to
- the caller and there's no way to ensure we restore the TOC when
- we return. With the secure-plt SYSV ABI we can't make non-local
- calls when -fpic/PIC because the plt call stubs use r30. */
- if (DEFAULT_ABI == ABI_DARWIN
- || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
- && decl
- && !DECL_EXTERNAL (decl)
- && !DECL_WEAK (decl)
- && (*targetm.binds_local_p) (decl))
- || (DEFAULT_ABI == ABI_V4
- && (!TARGET_SECURE_PLT
- || !flag_pic
- || (decl
- && (*targetm.binds_local_p) (decl)))))
+ if (rs6000_decl_ok_for_sibcall (decl))
{
tree attr_list = TYPE_ATTRIBUTES (fntype);
if (TARGET_DEBUG_ADDR)
{
- if (GET_CODE (symbol) == SYMBOL_REF)
+ if (SYMBOL_REF_P (symbol))
fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
XSTR (symbol, 0));
else
size_rtx = tmp_reg;
}
- if (Pmode == SImode)
+ if (TARGET_32BIT)
insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
stack_pointer_rtx,
size_rtx,
orig_sp));
else
- insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
- stack_pointer_rtx,
- size_rtx,
- orig_sp));
+ insn = emit_insn (gen_movdi_update_stack (stack_pointer_rtx,
+ stack_pointer_rtx,
+ size_rtx,
+ orig_sp));
rtx par = PATTERN (insn);
gcc_assert (GET_CODE (par) == PARALLEL);
rtx set = XVECEXP (par, 0, 0);
emit_insn (insn);
emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
}
- else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
+ else if (SYMBOL_REF_P (stack_limit_rtx)
&& TARGET_32BIT
&& DEFAULT_ABI == ABI_V4
&& !flag_pic)
if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
{
if (!epiloguep || call_used_regs [i])
- clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (V4SImode, i));
+ clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
else
{
rtx reg = gen_rtx_REG (V4SImode, i);
if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
RTVEC_ELT (p, offset++) = ret_rtx;
- RTVEC_ELT (p, offset++)
- = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
+ RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
sym = rs6000_savres_routine_sym (info, sel);
RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
if ((sel & SAVRES_REG) == SAVRES_VR)
{
/* Vector regs are saved/restored using [reg+reg] addressing. */
- RTVEC_ELT (p, offset++)
- = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
+ RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
RTVEC_ELT (p, offset++)
= gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
}
then the arg pointer is used. */
if (cfun->machine->split_stack_arg_pointer != NULL_RTX
&& (!REG_P (cfun->machine->split_stack_arg_pointer)
- || (REGNO (cfun->machine->split_stack_arg_pointer)
- < FIRST_PSEUDO_REGISTER)))
+ || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
return true;
/* Unfortunately we also need to do some code scanning, since
/* Return whether we need to emit an ELFv2 global entry point prologue. */
static bool
-rs6000_global_entry_point_needed_p (void)
+rs6000_global_entry_point_prologue_needed_p (void)
{
/* Only needed for the ELFv2 ABI. */
if (DEFAULT_ABI != ABI_ELFv2)
if (TARGET_SINGLE_PIC_BASE)
return false;
+ /* PC-relative functions never generate a global entry point prologue. */
+ if (rs6000_pcrel_p (cfun))
+ return false;
+
/* Ensure we have a global entry point for thunks. ??? We could
avoid that if the target routine doesn't need a global entry point,
but we do not know whether this is the case at this point. */
sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
p = rtvec_alloc (sz);
j = 0;
- RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (SImode,
- LR_REGNO));
+ RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
gen_rtx_SYMBOL_REF (Pmode,
"*save_world"));
rs6000_output_function_prologue (FILE *file)
{
if (!cfun->is_thunk)
- rs6000_output_savres_externs (file);
+ {
+ rs6000_output_savres_externs (file);
+#ifdef USING_ELFOS_H
+ const char *curr_machine = rs6000_machine_from_flags ();
+ if (rs6000_machine != curr_machine)
+ {
+ rs6000_machine = curr_machine;
+ emit_asm_machine ();
+ }
+#endif
+ }
/* ELFv2 ABI r2 setup code and local entry point. This must follow
immediately after the global entry point label. */
- if (rs6000_global_entry_point_needed_p ())
+ if (rs6000_global_entry_point_prologue_needed_p ())
{
const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
-
(*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
if (TARGET_CMODEL != CMODEL_LARGE)
fputs ("\n", file);
}
+ else if (rs6000_pcrel_p (cfun))
+ {
+ const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ /* All functions compiled to use PC-relative addressing will
+ have a .localentry value of 0 or 1. For now we set it to
+ 1 all the time, indicating that the function may clobber
+ the TOC register r2. Later we may optimize this by setting
+ it to 0 if the function is a leaf and does not clobber r2. */
+ fputs ("\t.localentry\t", file);
+ assemble_name (file, name);
+ fputs (",1\n", file);
+ }
+
/* Output -mprofile-kernel code. This needs to be done here instead of
in output_function_profile since it must go after the ELFv2 ABI
local entry point. */
/* Reload CR from REG. */
static void
-restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
+restore_saved_cr (rtx reg, bool using_mfcr_multiple, bool exit_func)
{
int count = 0;
int i;
/* Emit function epilogue as insns. */
void
-rs6000_emit_epilogue (int sibcall)
-{
- rs6000_stack_t *info;
- int restoring_GPRs_inline;
- int restoring_FPRs_inline;
- int using_load_multiple;
- int using_mtcr_multiple;
- int use_backchain_to_restore_sp;
- int restore_lr;
- int strategy;
+rs6000_emit_epilogue (enum epilogue_type epilogue_type)
+{
HOST_WIDE_INT frame_off = 0;
rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
rtx frame_reg_rtx = sp_reg_rtx;
machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
int fp_reg_size = 8;
int i;
- bool exit_func;
unsigned ptr_regno;
- info = rs6000_stack_info ();
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (epilogue_type == EPILOGUE_TYPE_NORMAL && crtl->calls_eh_return)
+ epilogue_type = EPILOGUE_TYPE_EH_RETURN;
+
+ int strategy = info->savres_strategy;
+ bool using_load_multiple = !!(strategy & REST_MULTIPLE);
+ bool restoring_GPRs_inline = !!(strategy & REST_INLINE_GPRS);
+ bool restoring_FPRs_inline = !!(strategy & REST_INLINE_FPRS);
+ if (epilogue_type == EPILOGUE_TYPE_SIBCALL)
+ {
+ restoring_GPRs_inline = true;
+ restoring_FPRs_inline = true;
+ }
+
+ bool using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
+ || rs6000_tune == PROCESSOR_PPC603
+ || rs6000_tune == PROCESSOR_PPC750
+ || optimize_size);
- strategy = info->savres_strategy;
- using_load_multiple = strategy & REST_MULTIPLE;
- restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
- restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
- using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
- || rs6000_tune == PROCESSOR_PPC603
- || rs6000_tune == PROCESSOR_PPC750
- || optimize_size);
/* Restore via the backchain when we have a large frame, since this
is more efficient than an addis, addi pair. The second condition
here will not trigger at the moment; We don't actually need a
frame pointer for alloca, but the generic parts of the compiler
give us one anyway. */
- use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
- ? info->lr_save_offset
- : 0) > 32767
- || (cfun->calls_alloca
- && !frame_pointer_needed));
- restore_lr = (info->lr_save_p
+ bool use_backchain_to_restore_sp
+ = (info->total_size + (info->lr_save_p ? info->lr_save_offset : 0) > 32767
+ || (cfun->calls_alloca && !frame_pointer_needed));
+
+ bool restore_lr = (info->lr_save_p
&& (restoring_FPRs_inline
|| (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
&& (restoring_GPRs_inline
if (WORLD_SAVE_P (info))
{
- int i, j;
- char rname[30];
- const char *alloc_rname;
- rtvec p;
+ gcc_assert (epilogue_type != EPILOGUE_TYPE_SIBCALL);
/* eh_rest_world_r10 will return to the location saved in the LR
stack slot (which is not likely to be our caller.)
The exception-handling stuff that was here in 2.95 is no
longer necessary. */
+ rtvec p;
p = rtvec_alloc (9
+ 32 - info->first_gp_reg_save
+ LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
+ 63 + 1 - info->first_fp_reg_save);
- strcpy (rname, ((crtl->calls_eh_return) ?
- "*eh_rest_world_r10" : "*rest_world"));
- alloc_rname = ggc_strdup (rname);
+ const char *rname;
+ switch (epilogue_type)
+ {
+ case EPILOGUE_TYPE_NORMAL:
+ rname = ggc_strdup ("*rest_world");
+ break;
- j = 0;
+ case EPILOGUE_TYPE_EH_RETURN:
+ rname = ggc_strdup ("*eh_rest_world_r10");
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ int j = 0;
RTVEC_ELT (p, j++) = ret_rtx;
RTVEC_ELT (p, j++)
- = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
+ = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, rname));
/* The instruction pattern requires a clobber here;
it is shared with the restVEC helper. */
- RTVEC_ELT (p, j++)
- = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
+ RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
{
/* CR register traditionally saved as CR2. */
}
}
+ int i;
for (i = 0; i < 32 - info->first_gp_reg_save; i++)
{
rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
&& save_reg_p (info->first_fp_reg_save + i))
cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
}
- RTVEC_ELT (p, j++)
- = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
- RTVEC_ELT (p, j++)
- = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
- RTVEC_ELT (p, j++)
- = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
- RTVEC_ELT (p, j++)
- = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
+ RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
+ RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
+ RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
+ RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
RTVEC_ELT (p, j++)
= gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
}
else if (info->push_p
&& DEFAULT_ABI != ABI_V4
- && !crtl->calls_eh_return)
+ && epilogue_type != EPILOGUE_TYPE_EH_RETURN)
{
/* Prevent reordering memory accesses against stack pointer restore. */
if (cfun->calls_alloca
function will deallocate the stack, so we don't need to worry
about the unwinder restoring cr from an invalid stack frame
location. */
- exit_func = (!restoring_FPRs_inline
- || (!restoring_GPRs_inline
- && info->first_fp_reg_save == 64));
+ bool exit_func = (!restoring_FPRs_inline
+ || (!restoring_GPRs_inline
+ && info->first_fp_reg_save == 64));
/* In the ELFv2 ABI we need to restore all call-saved CR fields from
*separate* slots if the routine calls __builtin_eh_return, so
restore_saved_lr (0, exit_func);
/* Load exception handler data registers, if needed. */
- if (crtl->calls_eh_return)
+ if (epilogue_type == EPILOGUE_TYPE_EH_RETURN)
{
unsigned int i, regno;
RTX_FRAME_RELATED_P (insn) = 1;
}
- if (crtl->calls_eh_return)
+ if (epilogue_type == EPILOGUE_TYPE_EH_RETURN)
{
rtx sa = EH_RETURN_STACKADJ_RTX;
emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
}
- if (!sibcall && restoring_FPRs_inline)
+ if (epilogue_type != EPILOGUE_TYPE_SIBCALL && restoring_FPRs_inline)
{
if (cfa_restores)
{
emit_jump_insn (targetm.gen_simple_return ());
}
- if (!sibcall && !restoring_FPRs_inline)
+ if (epilogue_type != EPILOGUE_TYPE_SIBCALL && !restoring_FPRs_inline)
{
bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
int elt = 0;
RTVEC_ELT (p, elt++) = ret_rtx;
if (lr)
- RTVEC_ELT (p, elt++)
- = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
+ RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
/* We have to restore more than two FP registers, so branch to the
restore function. It will return to our caller. */
if (cfa_restores)
{
- if (sibcall)
+ if (epilogue_type == EPILOGUE_TYPE_SIBCALL)
/* Ensure the cfa_restores are hung off an insn that won't
be reordered above other restores. */
emit_insn (gen_blockage ());
length fields that follow. However, if you omit the optional
fields, the assembler outputs zeros for all optional fields
anyways, giving each variable length field is minimum length
- (as defined in sys/debug.h). Thus we can not use the .tbtab
+ (as defined in sys/debug.h). Thus we cannot use the .tbtab
pseudo-op at all. */
/* An all-zero word flags the start of the tbtab, for debuggers
rtx parameter = DECL_INCOMING_RTL (decl);
machine_mode mode = GET_MODE (parameter);
- if (GET_CODE (parameter) == REG)
+ if (REG_P (parameter))
{
if (SCALAR_FLOAT_MODE_P (mode))
{
if (global_regs[29])
{
- error ("%qs uses register r29", "-fsplit-stack");
+ error ("%qs uses register r29", "%<-fsplit-stack%>");
inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
"conflicts with %qD", global_regs_decl[29]);
}
allocate = info->total_size;
if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
{
- sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
+ sorry ("Stack frame larger than 2G is not supported for "
+ "%<-fsplit-stack%>");
return;
}
if (morestack_ref == NULL_RTX)
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function)
{
+ const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
rtx this_rtx, funexp;
rtx_insn *insn;
/* Run just enough of rest_of_compilation to get the insns emitted.
There's not really enough bulk here to make other passes such as
- instruction scheduling worth while. Note that use_thunk calls
- assemble_start_function and assemble_end_function. */
+ instruction scheduling worth while. */
insn = get_insns ();
shorten_branches (insn);
+ assemble_start_function (thunk_fndecl, fnname);
final_start_function (insn, file, 1);
final (insn, file, 1);
final_end_function ();
+ assemble_end_function (thunk_fndecl, fnname);
reload_completed = 0;
epilogue_completed = 0;
fprintf (file, "%d\n", ((*found)->labelno));
#ifdef HAVE_AS_TLS
- if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
+ if (TARGET_XCOFF && SYMBOL_REF_P (x)
&& (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
|| SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
{
return;
}
}
- else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
+ else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
{
unsigned HOST_WIDE_INT low;
HOST_WIDE_INT high;
if (GET_CODE (x) == CONST)
{
gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
base = XEXP (XEXP (x, 0), 0);
offset = INTVAL (XEXP (XEXP (x, 0), 1));
output_addr_const (file, x);
#if HAVE_AS_TLS
- if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
+ if (TARGET_XCOFF && SYMBOL_REF_P (base))
{
switch (SYMBOL_REF_TLS_MODEL (base))
{
some cycles later. */
/* Separate a load from a narrower, dependent store. */
- if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
+ if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9
+ || rs6000_tune == PROCESSOR_FUTURE)
&& GET_CODE (PATTERN (insn)) == SET
&& GET_CODE (PATTERN (dep_insn)) == SET
- && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
- && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
+ && MEM_P (XEXP (PATTERN (insn), 1))
+ && MEM_P (XEXP (PATTERN (dep_insn), 0))
&& (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
> GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
return cost + 14;
|| rs6000_tune == PROCESSOR_POWER7
|| rs6000_tune == PROCESSOR_POWER8
|| rs6000_tune == PROCESSOR_POWER9
+ || rs6000_tune == PROCESSOR_FUTURE
|| rs6000_tune == PROCESSOR_CELL)
&& recog_memoized (dep_insn)
&& (INSN_CODE (dep_insn) >= 0))
case PROCESSOR_POWER8:
return 7;
case PROCESSOR_POWER9:
+ case PROCESSOR_FUTURE:
return 6;
default:
return 1;
if (tie_operand (pat, VOIDmode))
return false;
- if (GET_CODE (pat) == MEM)
+ if (MEM_P (pat))
{
*mem_ref = pat;
return true;
call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
}
- if (HAVE_AS_PLTSEQ
- && TARGET_TLS_MARKERS
- && (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4))
+ if (TARGET_PLTSEQ)
{
rtx base = const0_rtx;
- int regno;
- if (DEFAULT_ABI == ABI_ELFv2)
+ int regno = 12;
+ if (rs6000_pcrel_p (cfun))
{
- base = gen_rtx_REG (Pmode, TOC_REGISTER);
- regno = 12;
+ rtx reg = gen_rtx_REG (Pmode, regno);
+ rtx u = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
+ UNSPEC_PLT_PCREL);
+ emit_insn (gen_rtx_SET (reg, u));
+ return reg;
}
+
+ if (DEFAULT_ABI == ABI_ELFv2)
+ base = gen_rtx_REG (Pmode, TOC_REGISTER);
else
{
if (flag_pic)
{
while (GET_CODE (addr) == PLUS)
{
- if (GET_CODE (XEXP (addr, 0)) == REG
+ if (REG_P (XEXP (addr, 0))
&& REGNO (XEXP (addr, 0)) != 0)
addr = XEXP (addr, 0);
- else if (GET_CODE (XEXP (addr, 1)) == REG
+ else if (REG_P (XEXP (addr, 1))
&& REGNO (XEXP (addr, 1)) != 0)
addr = XEXP (addr, 1);
else if (CONSTANT_P (XEXP (addr, 0)))
else
gcc_unreachable ();
}
- gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
+ gcc_assert (REG_P (addr) && REGNO (addr) != 0);
return addr;
}
}
else
{
- strcat (tmp_buf, ":\nlis r12,hi16(");
+ strcat (tmp_buf, ":\n\tlis r12,hi16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
strcat (tmp_buf, name_buf);
return NULL_TREE;
}
-/* INSN is either a function call or a millicode call. It may have an
- unconditional jump in its delay slot.
-
- CALL_DEST is the routine we are calling. */
-
-char *
-macho_call_template (rtx_insn *insn, rtx *operands, int dest_operand_number,
- int cookie_operand_number)
-{
- static char buf[256];
- if (darwin_emit_branch_islands
- && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
- && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
- {
- tree labelname;
- tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
-
- if (no_previous_def (funname))
- {
- rtx label_rtx = gen_label_rtx ();
- char *label_buf, temp_buf[256];
- ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
- CODE_LABEL_NUMBER (label_rtx));
- label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
- labelname = get_identifier (label_buf);
- add_compiler_branch_island (labelname, funname, insn_line (insn));
- }
- else
- labelname = get_prev_label (funname);
-
- /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
- instruction will reach 'foo', otherwise link as 'bl L42'".
- "L42" should be a 'branch island', that will do a far jump to
- 'foo'. Branch islands are generated in
- macho_branch_islands(). */
- sprintf (buf, "jbsr %%z%d,%.246s",
- dest_operand_number, IDENTIFIER_POINTER (labelname));
- }
- else
- sprintf (buf, "bl %%z%d", dest_operand_number);
- return buf;
-}
-
/* Generate PIC and indirect symbol stubs. */
void
unsigned int length;
char *symbol_name, *lazy_ptr_name;
char *local_label_0;
- static int label = 0;
+ static unsigned label = 0;
/* Lose our funky encoding stuff so it doesn't contaminate the stub. */
symb = (*targetm.strip_name_encoding) (symb);
fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
label++;
- local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
- sprintf (local_label_0, "\"L%011d$spb\"", label);
+ local_label_0 = XALLOCAVEC (char, 16);
+ sprintf (local_label_0, "L%u$spb", label);
fprintf (file, "\tmflr r0\n");
if (TARGET_LINK_STACK)
rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
Pmode, reg);
- if (GET_CODE (offset) == CONST_INT)
+ if (CONST_INT_P (offset))
{
if (SMALL_INT (offset))
return plus_constant (Pmode, base, INTVAL (offset));
ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
- if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
+ if (TARGET_CMODEL == CMODEL_LARGE
+ && rs6000_global_entry_point_prologue_needed_p ())
{
char buf[256];
rs6000_xcoff_output_readwrite_section_asm_op,
&xcoff_private_data_section_name);
+ read_only_private_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_private_rodata_section_name);
+
tls_data_section
= get_unnamed_section (SECTION_TLS,
rs6000_xcoff_output_tls_section_asm_op,
rs6000_xcoff_output_tls_section_asm_op,
&xcoff_private_data_section_name);
- read_only_private_data_section
- = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
- &xcoff_private_data_section_name);
-
toc_section
= get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
main_input_filename, ".bss_");
rs6000_gen_section_name (&xcoff_private_data_section_name,
main_input_filename, ".rw_");
+ rs6000_gen_section_name (&xcoff_private_rodata_section_name,
+ main_input_filename, ".rop_");
rs6000_gen_section_name (&xcoff_read_only_section_name,
main_input_filename, ".ro_");
rs6000_gen_section_name (&xcoff_tls_data_section_name,
if (!MEM_P (rtl))
return;
symbol = XEXP (rtl, 0);
- if (GET_CODE (symbol) != SYMBOL_REF)
+ if (!SYMBOL_REF_P (symbol))
return;
flags = SYMBOL_REF_FLAGS (symbol);
return false;
case MULT:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& satisfies_constraint_I (XEXP (x, 1)))
{
if (INTVAL (XEXP (x, 1)) >= -256
case UDIV:
case UMOD:
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
{
if (code == DIV || code == MOD)
case SIGN_EXTEND:
case ZERO_EXTEND:
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
*total = 0;
else
*total = COSTS_N_INSNS (1);
reg_class_t from, reg_class_t to)
{
int ret;
+ reg_class_t rclass;
if (TARGET_DEBUG_COST)
dbg_cost_ctrl++;
+ /* If we have VSX, we can easily move between FPR or Altivec registers,
+ otherwise we can only easily move within classes.
+ Do this first so we give best-case answers for union classes
+ containing both gprs and vsx regs. */
+ HARD_REG_SET to_vsx, from_vsx;
+ COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
+ AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
+ COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
+ AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
+ if (!hard_reg_set_empty_p (to_vsx)
+ && !hard_reg_set_empty_p (from_vsx)
+ && (TARGET_VSX
+ || hard_reg_set_intersect_p (to_vsx, from_vsx)))
+ {
+ int reg = FIRST_FPR_REGNO;
+ if (TARGET_VSX
+ || (TEST_HARD_REG_BIT (to_vsx, FIRST_ALTIVEC_REGNO)
+ && TEST_HARD_REG_BIT (from_vsx, FIRST_ALTIVEC_REGNO)))
+ reg = FIRST_ALTIVEC_REGNO;
+ ret = 2 * hard_regno_nregs (reg, mode);
+ }
+
/* Moves from/to GENERAL_REGS. */
- if (reg_classes_intersect_p (to, GENERAL_REGS)
- || reg_classes_intersect_p (from, GENERAL_REGS))
+ else if ((rclass = from, reg_classes_intersect_p (to, GENERAL_REGS))
+ || (rclass = to, reg_classes_intersect_p (from, GENERAL_REGS)))
{
- reg_class_t rclass = from;
-
- if (! reg_classes_intersect_p (to, GENERAL_REGS))
- rclass = to;
-
if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
- ret = (rs6000_memory_move_cost (mode, rclass, false)
- + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
+ {
+ if (TARGET_DIRECT_MOVE)
+ {
+ /* Keep the cost for direct moves above that for within
+ a register class even if the actual processor cost is
+ comparable. We do this because a direct move insn
+ can't be a nop, whereas with ideal register
+ allocation a move within the same class might turn
+ out to be a nop. */
+ if (rs6000_tune == PROCESSOR_POWER9
+ || rs6000_tune == PROCESSOR_FUTURE)
+ ret = 3 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
+ else
+ ret = 4 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
+ /* SFmode requires a conversion when moving between gprs
+ and vsx. */
+ if (mode == SFmode)
+ ret += 2;
+ }
+ else
+ ret = (rs6000_memory_move_cost (mode, rclass, false)
+ + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
+ }
/* It's more expensive to move CR_REGS than CR0_REGS because of the
shift. */
|| rs6000_tune == PROCESSOR_POWER7
|| rs6000_tune == PROCESSOR_POWER8
|| rs6000_tune == PROCESSOR_POWER9)
- && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
- ret = 6 * hard_regno_nregs (0, mode);
+ && reg_class_subset_p (rclass, SPECIAL_REGS))
+ ret = 6 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
else
/* A move will cost one instruction per GPR moved. */
- ret = 2 * hard_regno_nregs (0, mode);
+ ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
}
- /* If we have VSX, we can easily move between FPR or Altivec registers. */
- else if (VECTOR_MEM_VSX_P (mode)
- && reg_classes_intersect_p (to, VSX_REGS)
- && reg_classes_intersect_p (from, VSX_REGS))
- ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
-
- /* Moving between two similar registers is just one instruction. */
- else if (reg_classes_intersect_p (to, from))
- ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
-
/* Everything else has to go through GENERAL_REGS. */
else
ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
{
if (dbg_cost_ctrl == 1)
fprintf (stderr,
- "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
+ "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
ret, GET_MODE_NAME (mode), reg_class_names[from],
reg_class_names[to]);
dbg_cost_ctrl--;
return ret;
}
+/* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
+
+ The register allocator chooses GEN_OR_VSX_REGS for the allocno
+ class if GENERAL_REGS and VSX_REGS cost is lower than the memory
+ cost. This happens a lot when TARGET_DIRECT_MOVE makes the register
+ move cost between GENERAL_REGS and VSX_REGS low.
+
+ It might seem reasonable to use a union class. After all, if usage
+ of vsr is low and gpr high, it might make sense to spill gpr to vsr
+ rather than memory. However, in cases where register pressure of
+ both is high, like the cactus_adm spec test, allowing
+ GEN_OR_VSX_REGS as the allocno class results in bad decisions in
+ the first scheduling pass. This is partly due to an allocno of
+ GEN_OR_VSX_REGS wrongly contributing to the GENERAL_REGS pressure
+ class, which gives too high a pressure for GENERAL_REGS and too low
+ for VSX_REGS. So, force a choice of the subclass here.
+
+ The best class is also the union if GENERAL_REGS and VSX_REGS have
+ the same cost. In that case we do use GEN_OR_VSX_REGS as the
+ allocno class, since trying to narrow down the class by regno mode
+ is prone to error. For example, SImode is allowed in VSX regs and
+ in some cases (eg. gcc.target/powerpc/p9-xxbr-3.c do_bswap32_vect)
+ it would be wrong to choose an allocno of GENERAL_REGS based on
+ SImode. */
+
+static reg_class_t
+rs6000_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
+ reg_class_t allocno_class,
+ reg_class_t best_class)
+{
+ switch (allocno_class)
+ {
+ case GEN_OR_VSX_REGS:
+ /* best_class must be a subset of allocno_class. */
+ gcc_checking_assert (best_class == GEN_OR_VSX_REGS
+ || best_class == GEN_OR_FLOAT_REGS
+ || best_class == VSX_REGS
+ || best_class == ALTIVEC_REGS
+ || best_class == FLOAT_REGS
+ || best_class == GENERAL_REGS
+ || best_class == BASE_REGS);
+ /* Use best_class but choose wider classes when copying from the
+ wider class to best_class is cheap. This mimics IRA choice
+ of allocno class. */
+ if (best_class == BASE_REGS)
+ return GENERAL_REGS;
+ if (TARGET_VSX
+ && (best_class == FLOAT_REGS || best_class == ALTIVEC_REGS))
+ return VSX_REGS;
+ return best_class;
+
+ default:
+ break;
+ }
+
+ return allocno_class;
+}
+
/* Returns a code for a target-specific builtin that implements
reciprocal of the function, or NULL_TREE if not available. */
numbering) are what we need. */
if (!BYTES_BIG_ENDIAN
&& icode == CODE_FOR_altivec_vpkuwum_direct
- && ((GET_CODE (op0) == REG
+ && ((REG_P (op0)
&& GET_MODE (op0) != V4SImode)
- || (GET_CODE (op0) == SUBREG
+ || (SUBREG_P (op0)
&& GET_MODE (XEXP (op0, 0)) != V4SImode)))
continue;
if (!BYTES_BIG_ENDIAN
&& icode == CODE_FOR_altivec_vpkuhum_direct
- && ((GET_CODE (op0) == REG
+ && ((REG_P (op0)
&& GET_MODE (op0) != V8HImode)
- || (GET_CODE (op0) == SUBREG
+ || (SUBREG_P (op0)
&& GET_MODE (XEXP (op0, 0)) != V8HImode)))
continue;
}
/* Compute register pressure classes. We implement the target hook to avoid
- IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
+ IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
lead to incorrect estimates of number of available registers and therefor
increased register pressure/spill. */
static int
unsigned int
rs6000_dbx_register_number (unsigned int regno, unsigned int format)
{
- /* Except for the above, we use the internal number for non-DWARF
- debug information, and also for .eh_frame. */
- if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
- return regno;
-
/* On some platforms, we use the standard DWARF register
numbering for .debug_info and .debug_frame. */
+ if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
+ {
#ifdef RS6000_USE_DWARF_NUMBERING
- if (regno <= 63)
+ if (regno <= 31)
+ return regno;
+ if (FP_REGNO_P (regno))
+ return regno - FIRST_FPR_REGNO + 32;
+ if (ALTIVEC_REGNO_P (regno))
+ return regno - FIRST_ALTIVEC_REGNO + 1124;
+ if (regno == LR_REGNO)
+ return 108;
+ if (regno == CTR_REGNO)
+ return 109;
+ if (regno == CA_REGNO)
+ return 101; /* XER */
+ /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
+ translated any combination of CR2, CR3, CR4 saves to a save of CR2.
+ The actual code emitted saves the whole of CR, so we map CR2_REGNO
+ to the DWARF reg for CR. */
+ if (format == 1 && regno == CR2_REGNO)
+ return 64;
+ if (CR_REGNO_P (regno))
+ return regno - CR0_REGNO + 86;
+ if (regno == VRSAVE_REGNO)
+ return 356;
+ if (regno == VSCR_REGNO)
+ return 67;
+
+ /* These do not make much sense. */
+ if (regno == FRAME_POINTER_REGNUM)
+ return 111;
+ if (regno == ARG_POINTER_REGNUM)
+ return 67;
+ if (regno == 64)
+ return 100;
+
+ gcc_unreachable ();
+#endif
+ }
+
+ /* We use the GCC 7 (and before) internal number for non-DWARF debug
+ information, and also for .eh_frame. */
+ /* Translate the regnos to their numbers in GCC 7 (and before). */
+ if (regno <= 31)
return regno;
+ if (FP_REGNO_P (regno))
+ return regno - FIRST_FPR_REGNO + 32;
+ if (ALTIVEC_REGNO_P (regno))
+ return regno - FIRST_ALTIVEC_REGNO + 77;
if (regno == LR_REGNO)
- return 108;
+ return 65;
if (regno == CTR_REGNO)
- return 109;
- /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
- translated any combination of CR2, CR3, CR4 saves to a save of CR2.
- The actual code emitted saves the whole of CR, so we map CR2_REGNO
- to the DWARF reg for CR. */
- if (format == 1 && regno == CR2_REGNO)
- return 64;
- if (CR_REGNO_P (regno))
- return regno - CR0_REGNO + 86;
+ return 66;
if (regno == CA_REGNO)
- return 101; /* XER */
- if (ALTIVEC_REGNO_P (regno))
- return regno - FIRST_ALTIVEC_REGNO + 1124;
+ return 76; /* XER */
+ if (CR_REGNO_P (regno))
+ return regno - CR0_REGNO + 68;
if (regno == VRSAVE_REGNO)
- return 356;
+ return 109;
if (regno == VSCR_REGNO)
+ return 110;
+
+ if (regno == FRAME_POINTER_REGNUM)
+ return 111;
+ if (regno == ARG_POINTER_REGNUM)
return 67;
-#endif
- return regno;
+ if (regno == 64)
+ return 64;
+
+ gcc_unreachable ();
}
/* target hook eh_return_filter_mode */
{ "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
{ "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
{ "fprnd", OPTION_MASK_FPRND, false, true },
+ { "future", OPTION_MASK_FUTURE, false, true },
{ "hard-dfp", OPTION_MASK_DFP, false, true },
{ "htm", OPTION_MASK_HTM, false, true },
{ "isel", OPTION_MASK_ISEL, false, true },
{ "mfcrf", OPTION_MASK_MFCRF, false, true },
- { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
+ { "mfpgpr", 0, false, true },
{ "modulo", OPTION_MASK_MODULO, false, true },
{ "mulhw", OPTION_MASK_MULHW, false, true },
{ "multiple", OPTION_MASK_MULTIPLE, false, true },
+ { "pcrel", OPTION_MASK_PCREL, false, true },
{ "popcntb", OPTION_MASK_POPCNTB, false, true },
{ "popcntd", OPTION_MASK_POPCNTD, false, true },
{ "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
{ "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
{ "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
{ "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
+ { "prefixed-addr", OPTION_MASK_PREFIXED_ADDR, false, true },
{ "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
{ "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
{ "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
#ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
error_at (DECL_SOURCE_LOCATION (default_node->decl),
- "target_clones attribute needs GLIBC (2.23 and newer) that "
+ "%<target_clones%> attribute needs GLIBC (2.23 and newer) that "
"exports hardware capability bits");
#else
/* Build result decl and add to function_decl. */
tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
+ DECL_CONTEXT (t) = decl;
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_RESULT (decl) = t;
rtx call[4];
int n_call;
rtx insn;
+ bool is_pltseq_longcall;
if (global_tlsarg)
tlsarg = global_tlsarg;
/* Handle longcall attributes. */
+ is_pltseq_longcall = false;
if ((INTVAL (cookie) & CALL_LONG) != 0
&& GET_CODE (func_desc) == SYMBOL_REF)
- func = rs6000_longcall_ref (func_desc, tlsarg);
+ {
+ func = rs6000_longcall_ref (func_desc, tlsarg);
+ if (TARGET_PLTSEQ)
+ is_pltseq_longcall = true;
+ }
/* Handle indirect calls. */
- if (GET_CODE (func) != SYMBOL_REF
+ if (!SYMBOL_REF_P (func)
|| (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
{
- /* Save the TOC into its reserved slot before the call,
- and prepare to restore it after the call. */
- rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
- rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
- gen_rtvec (1, stack_toc_offset),
- UNSPEC_TOCSLOT);
- toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
-
- /* Can we optimize saving the TOC in the prologue or
- do we need to do it at every call? */
- if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
- cfun->machine->save_toc_in_prologue = true;
- else
+ if (!rs6000_pcrel_p (cfun))
{
- rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
- rtx stack_toc_mem = gen_frame_mem (Pmode,
- gen_rtx_PLUS (Pmode, stack_ptr,
- stack_toc_offset));
- MEM_VOLATILE_P (stack_toc_mem) = 1;
- if (HAVE_AS_PLTSEQ
- && TARGET_TLS_MARKERS
- && DEFAULT_ABI == ABI_ELFv2
- && GET_CODE (func_desc) == SYMBOL_REF)
+ /* Save the TOC into its reserved slot before the call,
+ and prepare to restore it after the call. */
+ rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
+ rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
+ gen_rtvec (1, stack_toc_offset),
+ UNSPEC_TOCSLOT);
+ toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
+
+ /* Can we optimize saving the TOC in the prologue or
+ do we need to do it at every call? */
+ if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
+ cfun->machine->save_toc_in_prologue = true;
+ else
{
- rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
- rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
- emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
+ rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
+ rtx stack_toc_mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_ptr,
+ stack_toc_offset));
+ MEM_VOLATILE_P (stack_toc_mem) = 1;
+ if (is_pltseq_longcall)
+ {
+ rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
+ rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
+ emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
+ }
+ else
+ emit_move_insn (stack_toc_mem, toc_reg);
}
- else
- emit_move_insn (stack_toc_mem, toc_reg);
}
if (DEFAULT_ABI == ABI_ELFv2)
calls via LR, so move the address there. Needed to mark
this insn for linker plt sequence editing too. */
func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
- if (HAVE_AS_PLTSEQ
- && TARGET_TLS_MARKERS
- && GET_CODE (func_desc) == SYMBOL_REF)
+ if (is_pltseq_longcall)
{
rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
}
else
{
- /* Direct calls use the TOC: for local calls, the callee will
- assume the TOC register is set; for non-local calls, the
- PLT stub needs the TOC register. */
- abi_reg = toc_reg;
+ /* No TOC register needed for calls from PC-relative callers. */
+ if (!rs6000_pcrel_p (cfun))
+ /* Direct calls use the TOC: for local calls, the callee will
+ assume the TOC register is set; for non-local calls, the
+ PLT stub needs the TOC register. */
+ abi_reg = toc_reg;
func_addr = func;
}
if (toc_restore)
call[n_call++] = toc_restore;
- call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
+ call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
insn = emit_call_insn (insn);
insn = emit_call_insn (insn);
/* Note use of the TOC register. */
- use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
+ if (!rs6000_pcrel_p (cfun))
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
+ gen_rtx_REG (Pmode, TOC_REGNUM));
}
/* Expand code to perform a call under the SYSV4 ABI. */
{
rtx func = func_desc;
rtx func_addr;
- rtx call[3];
+ rtx call[4];
rtx insn;
rtx abi_reg = NULL_RTX;
+ int n;
if (global_tlsarg)
tlsarg = global_tlsarg;
&& GET_CODE (func_desc) == SYMBOL_REF)
{
func = rs6000_longcall_ref (func_desc, tlsarg);
- /* If the longcall was implemented using PLT16 relocs, then r11
- needs to be valid at the call for lazy linking. */
- if (HAVE_AS_PLTSEQ
- && TARGET_TLS_MARKERS)
+ /* If the longcall was implemented as an inline PLT call using
+ PLT unspecs then func will be REG:r11. If not, func will be
+ a pseudo reg. The inline PLT call sequence supports lazy
+ linking (and longcalls to functions in dlopen'd libraries).
+ The other style of longcalls don't. The lazy linking entry
+ to the dynamic symbol resolver requires r11 be the function
+ address (as it is for linker generated PLT stubs). Ensure
+ r11 stays valid to the bctrl by marking r11 used by the call. */
+ if (TARGET_PLTSEQ)
abi_reg = func;
}
func = force_reg (Pmode, func);
/* Indirect calls via CTR are strongly preferred over indirect
- calls via LR, so move the address there. Needed to mark
- this insn for linker plt sequence editing too. */
+ calls via LR, so move the address there. That can't be left
+ to reload because we want to mark every instruction in an
+ inline PLT call sequence with a reloc, enabling the linker to
+ edit the sequence back to a direct call when that makes sense. */
func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
- if (HAVE_AS_PLTSEQ
- && TARGET_TLS_MARKERS
- && GET_CODE (func_desc) == SYMBOL_REF)
+ if (abi_reg)
{
rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
if (value != NULL_RTX)
call[0] = gen_rtx_SET (value, call[0]);
- unsigned int mask = CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS;
- call[1] = gen_rtx_USE (VOIDmode, GEN_INT (INTVAL (cookie) & mask));
- call[2] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
+ call[1] = gen_rtx_USE (VOIDmode, cookie);
+ n = 2;
+ if (TARGET_SECURE_PLT
+ && flag_pic
+ && GET_CODE (func_addr) == SYMBOL_REF
+ && !SYMBOL_REF_LOCAL_P (func_addr))
+ call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
- insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
+ call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
+
+ insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
insn = emit_call_insn (insn);
if (abi_reg)
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
&& GET_CODE (func_desc) == SYMBOL_REF)
{
func = rs6000_longcall_ref (func_desc, tlsarg);
- /* If the longcall was implemented using PLT16 relocs, then r11
- needs to be valid at the call for lazy linking. */
- if (HAVE_AS_PLTSEQ
- && TARGET_TLS_MARKERS)
+ /* If the longcall was implemented as an inline PLT call using
+ PLT unspecs then func will be REG:r11. If not, func will be
+ a pseudo reg. The inline PLT call sequence supports lazy
+ linking (and longcalls to functions in dlopen'd libraries).
+ The other style of longcalls don't. The lazy linking entry
+ to the dynamic symbol resolver requires r11 be the function
+ address (as it is for linker generated PLT stubs). Ensure
+ r11 stays valid to the bctr by marking r11 used by the call. */
+ if (TARGET_PLTSEQ)
abi_reg = func;
}
{
func = force_reg (Pmode, func);
- /* Indirect sibcalls must go via CTR. Needed to mark
- this insn for linker plt sequence editing too. */
+ /* Indirect sibcalls must go via CTR. That can't be left to
+ reload because we want to mark every instruction in an inline
+ PLT call sequence with a reloc, enabling the linker to edit
+ the sequence back to a direct call when that makes sense. */
func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
- if (HAVE_AS_PLTSEQ
- && TARGET_TLS_MARKERS
- && GET_CODE (func_desc) == SYMBOL_REF)
+ if (abi_reg)
{
rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
if (value != NULL_RTX)
call[0] = gen_rtx_SET (value, call[0]);
- unsigned int mask = CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS;
- call[1] = gen_rtx_USE (VOIDmode, GEN_INT (INTVAL (cookie) & mask));
+ call[1] = gen_rtx_USE (VOIDmode, cookie);
call[2] = simple_return_rtx;
insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
if (sibcall)
call[2] = simple_return_rtx;
else
- call[2] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
+ call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
insn = emit_call_insn (insn);
return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
}
+/* Return whether we should generate PC-relative code for FNDECL. */
+bool
+rs6000_fndecl_pcrel_p (const_tree fndecl)
+{
+ if (DEFAULT_ABI != ABI_ELFv2)
+ return false;
+
+ struct cl_target_option *opts = target_opts_for_fn (fndecl);
+
+ return ((opts->x_rs6000_isa_flags & OPTION_MASK_PCREL) != 0
+ && TARGET_CMODEL == CMODEL_MEDIUM);
+}
+
+/* Return whether we should generate PC-relative code for *FN. */
+bool
+rs6000_pcrel_p (struct function *fn)
+{
+ if (DEFAULT_ABI != ABI_ELFv2)
+ return false;
+
+ /* Optimize usual case. */
+ if (fn == cfun)
+ return ((rs6000_isa_flags & OPTION_MASK_PCREL) != 0
+ && TARGET_CMODEL == CMODEL_MEDIUM);
+
+ return rs6000_fndecl_pcrel_p (fn->decl);
+}
+
#ifdef HAVE_GAS_HIDDEN
# define USE_HIDDEN_LINKONCE 1
#else
rtx bool_rtx;
/* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
- if (op2 && GET_CODE (op2) == CONST_INT
+ if (op2 && CONST_INT_P (op2)
&& (mode == SImode || (mode == DImode && TARGET_POWERPC64))
&& !complement_final_p && !complement_op1_p && !complement_op2_p)
{
op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
else
{
- if (GET_CODE (operands[2]) != CONST_INT)
+ if (!CONST_INT_P (operands[2]))
{
op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
{
/* Split large IOR/XOR operations. */
if ((code == IOR || code == XOR)
- && GET_CODE (op2_hi_lo[i]) == CONST_INT
+ && CONST_INT_P (op2_hi_lo[i])
&& !complement_final_p
&& !complement_op1_p
&& !complement_op2_p