1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
40 #include "diagnostic-core.h"
46 #include "common/common-target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
52 /* Return nonzero if there is a bypass for the output of
53 OUT_INSN and the fp store IN_INSN. */
55 pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
57 enum machine_mode store_mode;
58 enum machine_mode other_mode;
61 if (recog_memoized (in_insn) < 0
62 || (get_attr_type (in_insn) != TYPE_FPSTORE
63 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
64 || recog_memoized (out_insn) < 0)
67 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
69 set = single_set (out_insn);
73 other_mode = GET_MODE (SET_SRC (set));
75 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
79 #ifndef DO_FRAME_NOTES
80 #ifdef INCOMING_RETURN_ADDR_RTX
81 #define DO_FRAME_NOTES 1
83 #define DO_FRAME_NOTES 0
87 static void pa_option_override (void);
88 static void copy_reg_pointer (rtx, rtx);
89 static void fix_range (const char *);
90 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
92 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
93 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
94 static inline rtx force_mode (enum machine_mode, rtx);
95 static void pa_reorg (void);
96 static void pa_combine_instructions (void);
97 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
98 static bool forward_branch_p (rtx);
99 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
100 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
101 static int compute_movmem_length (rtx);
102 static int compute_clrmem_length (rtx);
103 static bool pa_assemble_integer (rtx, unsigned int, int);
104 static void remove_useless_addtr_insns (int);
105 static void store_reg (int, HOST_WIDE_INT, int);
106 static void store_reg_modify (int, int, HOST_WIDE_INT);
107 static void load_reg (int, HOST_WIDE_INT, int);
108 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
109 static rtx pa_function_value (const_tree, const_tree, bool);
110 static rtx pa_libcall_value (enum machine_mode, const_rtx);
111 static bool pa_function_value_regno_p (const unsigned int);
112 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
113 static void update_total_code_bytes (unsigned int);
114 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static int pa_adjust_cost (rtx, rtx, rtx, int);
116 static int pa_adjust_priority (rtx, int);
117 static int pa_issue_rate (void);
118 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
119 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
120 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
122 static void pa_encode_section_info (tree, rtx, int);
123 static const char *pa_strip_name_encoding (const char *);
124 static bool pa_function_ok_for_sibcall (tree, tree);
125 static void pa_globalize_label (FILE *, const char *)
127 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
128 HOST_WIDE_INT, tree);
129 #if !defined(USE_COLLECT2)
130 static void pa_asm_out_constructor (rtx, int);
131 static void pa_asm_out_destructor (rtx, int);
133 static void pa_init_builtins (void);
134 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
135 static rtx hppa_builtin_saveregs (void);
136 static void hppa_va_start (tree, rtx);
137 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
138 static bool pa_scalar_mode_supported_p (enum machine_mode);
139 static bool pa_commutative_p (const_rtx x, int outer_code);
140 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
141 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
142 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
143 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
144 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
145 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
146 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
147 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
148 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
149 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
150 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
151 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
152 static void output_deferred_plabels (void);
153 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
154 #ifdef ASM_OUTPUT_EXTERNAL_REAL
155 static void pa_hpux_file_end (void);
157 static void pa_init_libfuncs (void);
158 static rtx pa_struct_value_rtx (tree, int);
159 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
161 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
163 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
165 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
167 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
168 static struct machine_function * pa_init_machine_status (void);
169 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
171 secondary_reload_info *);
172 static void pa_extra_live_on_entry (bitmap);
173 static enum machine_mode pa_promote_function_mode (const_tree,
174 enum machine_mode, int *,
177 static void pa_asm_trampoline_template (FILE *);
178 static void pa_trampoline_init (rtx, tree, rtx);
179 static rtx pa_trampoline_adjust_address (rtx);
180 static rtx pa_delegitimize_address (rtx);
181 static bool pa_print_operand_punct_valid_p (unsigned char);
182 static rtx pa_internal_arg_pointer (void);
183 static bool pa_can_eliminate (const int, const int);
184 static void pa_conditional_register_usage (void);
185 static enum machine_mode pa_c_mode_for_suffix (char);
186 static section *pa_function_section (tree, enum node_frequency, bool, bool);
187 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
188 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
189 static unsigned int pa_section_type_flags (tree, const char *, int);
190 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
192 /* The following extra sections are only used for SOM. */
193 static GTY(()) section *som_readonly_data_section;
194 static GTY(()) section *som_one_only_readonly_data_section;
195 static GTY(()) section *som_one_only_data_section;
196 static GTY(()) section *som_tm_clone_table_section;
198 /* Counts for the number of callee-saved general and floating point
199 registers which were saved by the current function's prologue. */
200 static int gr_saved, fr_saved;
202 /* Boolean indicating whether the return pointer was saved by the
203 current function's prologue. */
204 static bool rp_saved;
206 static rtx find_addr_reg (rtx);
208 /* Keep track of the number of bytes we have output in the CODE subspace
209 during this compilation so we'll know when to emit inline long-calls. */
210 unsigned long total_code_bytes;
212 /* The last address of the previous function plus the number of bytes in
213 associated thunks that have been output. This is used to determine if
214 a thunk can use an IA-relative branch to reach its target function. */
215 static unsigned int last_address;
217 /* Variables to handle plabels that we discover are necessary at assembly
218 output time. They are output after the current function. */
219 struct GTY(()) deferred_plabel
224 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
226 static size_t n_deferred_plabels = 0;
228 /* Initialize the GCC target structure. */
230 #undef TARGET_OPTION_OVERRIDE
231 #define TARGET_OPTION_OVERRIDE pa_option_override
233 #undef TARGET_ASM_ALIGNED_HI_OP
234 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
235 #undef TARGET_ASM_ALIGNED_SI_OP
236 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
237 #undef TARGET_ASM_ALIGNED_DI_OP
238 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
239 #undef TARGET_ASM_UNALIGNED_HI_OP
240 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
241 #undef TARGET_ASM_UNALIGNED_SI_OP
242 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
243 #undef TARGET_ASM_UNALIGNED_DI_OP
244 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
245 #undef TARGET_ASM_INTEGER
246 #define TARGET_ASM_INTEGER pa_assemble_integer
248 #undef TARGET_ASM_FUNCTION_PROLOGUE
249 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
250 #undef TARGET_ASM_FUNCTION_EPILOGUE
251 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
253 #undef TARGET_FUNCTION_VALUE
254 #define TARGET_FUNCTION_VALUE pa_function_value
255 #undef TARGET_LIBCALL_VALUE
256 #define TARGET_LIBCALL_VALUE pa_libcall_value
257 #undef TARGET_FUNCTION_VALUE_REGNO_P
258 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
260 #undef TARGET_LEGITIMIZE_ADDRESS
261 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
263 #undef TARGET_SCHED_ADJUST_COST
264 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
265 #undef TARGET_SCHED_ADJUST_PRIORITY
266 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
267 #undef TARGET_SCHED_ISSUE_RATE
268 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
270 #undef TARGET_ENCODE_SECTION_INFO
271 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
272 #undef TARGET_STRIP_NAME_ENCODING
273 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
275 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
276 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
278 #undef TARGET_COMMUTATIVE_P
279 #define TARGET_COMMUTATIVE_P pa_commutative_p
281 #undef TARGET_ASM_OUTPUT_MI_THUNK
282 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
283 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
284 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
286 #undef TARGET_ASM_FILE_END
287 #ifdef ASM_OUTPUT_EXTERNAL_REAL
288 #define TARGET_ASM_FILE_END pa_hpux_file_end
290 #define TARGET_ASM_FILE_END output_deferred_plabels
293 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
294 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
296 #if !defined(USE_COLLECT2)
297 #undef TARGET_ASM_CONSTRUCTOR
298 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
299 #undef TARGET_ASM_DESTRUCTOR
300 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
303 #undef TARGET_INIT_BUILTINS
304 #define TARGET_INIT_BUILTINS pa_init_builtins
306 #undef TARGET_EXPAND_BUILTIN
307 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
309 #undef TARGET_REGISTER_MOVE_COST
310 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
311 #undef TARGET_RTX_COSTS
312 #define TARGET_RTX_COSTS hppa_rtx_costs
313 #undef TARGET_ADDRESS_COST
314 #define TARGET_ADDRESS_COST hppa_address_cost
316 #undef TARGET_MACHINE_DEPENDENT_REORG
317 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
319 #undef TARGET_INIT_LIBFUNCS
320 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
322 #undef TARGET_PROMOTE_FUNCTION_MODE
323 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
324 #undef TARGET_PROMOTE_PROTOTYPES
325 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
327 #undef TARGET_STRUCT_VALUE_RTX
328 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
329 #undef TARGET_RETURN_IN_MEMORY
330 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
331 #undef TARGET_MUST_PASS_IN_STACK
332 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
333 #undef TARGET_PASS_BY_REFERENCE
334 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
335 #undef TARGET_CALLEE_COPIES
336 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
337 #undef TARGET_ARG_PARTIAL_BYTES
338 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
339 #undef TARGET_FUNCTION_ARG
340 #define TARGET_FUNCTION_ARG pa_function_arg
341 #undef TARGET_FUNCTION_ARG_ADVANCE
342 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
343 #undef TARGET_FUNCTION_ARG_BOUNDARY
344 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
346 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
347 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
348 #undef TARGET_EXPAND_BUILTIN_VA_START
349 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
350 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
351 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
353 #undef TARGET_SCALAR_MODE_SUPPORTED_P
354 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
356 #undef TARGET_CANNOT_FORCE_CONST_MEM
357 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
359 #undef TARGET_SECONDARY_RELOAD
360 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
362 #undef TARGET_EXTRA_LIVE_ON_ENTRY
363 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
365 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
366 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
367 #undef TARGET_TRAMPOLINE_INIT
368 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
369 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
370 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
371 #undef TARGET_DELEGITIMIZE_ADDRESS
372 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
373 #undef TARGET_INTERNAL_ARG_POINTER
374 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
375 #undef TARGET_CAN_ELIMINATE
376 #define TARGET_CAN_ELIMINATE pa_can_eliminate
377 #undef TARGET_CONDITIONAL_REGISTER_USAGE
378 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
379 #undef TARGET_C_MODE_FOR_SUFFIX
380 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
381 #undef TARGET_ASM_FUNCTION_SECTION
382 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
384 #undef TARGET_LEGITIMATE_CONSTANT_P
385 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
386 #undef TARGET_SECTION_TYPE_FLAGS
387 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
388 #undef TARGET_LEGITIMATE_ADDRESS_P
389 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
391 struct gcc_target targetm = TARGET_INITIALIZER;
393 /* Parse the -mfixed-range= option string. */
396 fix_range (const char *const_str)
399 char *str, *dash, *comma;
401 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
402 REG2 are either register names or register numbers. The effect
403 of this option is to mark the registers in the range from REG1 to
404 REG2 as ``fixed'' so they won't be used by the compiler. This is
405 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
407 i = strlen (const_str);
408 str = (char *) alloca (i + 1);
409 memcpy (str, const_str, i + 1);
413 dash = strchr (str, '-');
416 warning (0, "value of -mfixed-range must have form REG1-REG2");
421 comma = strchr (dash + 1, ',');
425 first = decode_reg_name (str);
428 warning (0, "unknown register name: %s", str);
432 last = decode_reg_name (dash + 1);
435 warning (0, "unknown register name: %s", dash + 1);
443 warning (0, "%s-%s is an empty range", str, dash + 1);
447 for (i = first; i <= last; ++i)
448 fixed_regs[i] = call_used_regs[i] = 1;
457 /* Check if all floating point registers have been fixed. */
458 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
463 target_flags |= MASK_DISABLE_FPREGS;
466 /* Implement the TARGET_OPTION_OVERRIDE hook. */
469 pa_option_override (void)
472 cl_deferred_option *opt;
473 vec<cl_deferred_option> *v
474 = (vec<cl_deferred_option> *) pa_deferred_options;
477 FOR_EACH_VEC_ELT (*v, i, opt)
479 switch (opt->opt_index)
481 case OPT_mfixed_range_:
482 fix_range (opt->arg);
490 /* Unconditional branches in the delay slot are not compatible with dwarf2
491 call frame information. There is no benefit in using this optimization
492 on PA8000 and later processors. */
493 if (pa_cpu >= PROCESSOR_8000
494 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
496 || flag_unwind_tables)
497 target_flags &= ~MASK_JUMP_IN_DELAY;
499 if (flag_pic && TARGET_PORTABLE_RUNTIME)
501 warning (0, "PIC code generation is not supported in the portable runtime model");
504 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
506 warning (0, "PIC code generation is not compatible with fast indirect calls");
509 if (! TARGET_GAS && write_symbols != NO_DEBUG)
511 warning (0, "-g is only supported when using GAS on this processor,");
512 warning (0, "-g option disabled");
513 write_symbols = NO_DEBUG;
516 /* We only support the "big PIC" model now. And we always generate PIC
517 code when in 64bit mode. */
518 if (flag_pic == 1 || TARGET_64BIT)
521 /* Disable -freorder-blocks-and-partition as we don't support hot and
522 cold partitioning. */
523 if (flag_reorder_blocks_and_partition)
525 inform (input_location,
526 "-freorder-blocks-and-partition does not work "
527 "on this architecture");
528 flag_reorder_blocks_and_partition = 0;
529 flag_reorder_blocks = 1;
532 /* We can't guarantee that .dword is available for 32-bit targets. */
533 if (UNITS_PER_WORD == 4)
534 targetm.asm_out.aligned_op.di = NULL;
536 /* The unaligned ops are only available when using GAS. */
539 targetm.asm_out.unaligned_op.hi = NULL;
540 targetm.asm_out.unaligned_op.si = NULL;
541 targetm.asm_out.unaligned_op.di = NULL;
544 init_machine_status = pa_init_machine_status;
549 PA_BUILTIN_COPYSIGNQ,
552 PA_BUILTIN_HUGE_VALQ,
556 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
559 pa_init_builtins (void)
561 #ifdef DONT_HAVE_FPUTC_UNLOCKED
563 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
564 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
565 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
572 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
573 set_user_assembler_name (decl, "_Isfinite");
574 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
575 set_user_assembler_name (decl, "_Isfinitef");
579 if (HPUX_LONG_DOUBLE_LIBRARY)
583 /* Under HPUX, the __float128 type is a synonym for "long double". */
584 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
587 /* TFmode support builtins. */
588 ftype = build_function_type_list (long_double_type_node,
589 long_double_type_node,
591 decl = add_builtin_function ("__builtin_fabsq", ftype,
592 PA_BUILTIN_FABSQ, BUILT_IN_MD,
593 "_U_Qfabs", NULL_TREE);
594 TREE_READONLY (decl) = 1;
595 pa_builtins[PA_BUILTIN_FABSQ] = decl;
597 ftype = build_function_type_list (long_double_type_node,
598 long_double_type_node,
599 long_double_type_node,
601 decl = add_builtin_function ("__builtin_copysignq", ftype,
602 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
603 "_U_Qfcopysign", NULL_TREE);
604 TREE_READONLY (decl) = 1;
605 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
607 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
608 decl = add_builtin_function ("__builtin_infq", ftype,
609 PA_BUILTIN_INFQ, BUILT_IN_MD,
611 pa_builtins[PA_BUILTIN_INFQ] = decl;
613 decl = add_builtin_function ("__builtin_huge_valq", ftype,
614 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
616 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
621 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
622 enum machine_mode mode ATTRIBUTE_UNUSED,
623 int ignore ATTRIBUTE_UNUSED)
625 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
626 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
630 case PA_BUILTIN_FABSQ:
631 case PA_BUILTIN_COPYSIGNQ:
632 return expand_call (exp, target, ignore);
634 case PA_BUILTIN_INFQ:
635 case PA_BUILTIN_HUGE_VALQ:
637 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
642 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
644 tmp = validize_mem (force_const_mem (target_mode, tmp));
647 target = gen_reg_rtx (target_mode);
649 emit_move_insn (target, tmp);
660 /* Function to init struct machine_function.
661 This will be called, via a pointer variable,
662 from push_function_context. */
664 static struct machine_function *
665 pa_init_machine_status (void)
667 return ggc_alloc_cleared_machine_function ();
670 /* If FROM is a probable pointer register, mark TO as a probable
671 pointer register with the same pointer alignment as FROM. */
674 copy_reg_pointer (rtx to, rtx from)
676 if (REG_POINTER (from))
677 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
680 /* Return 1 if X contains a symbolic expression. We know these
681 expressions will have one of a few well defined forms, so
682 we need only check those forms. */
684 pa_symbolic_expression_p (rtx x)
687 /* Strip off any HIGH. */
688 if (GET_CODE (x) == HIGH)
691 return symbolic_operand (x, VOIDmode);
694 /* Accept any constant that can be moved in one instruction into a
697 pa_cint_ok_for_move (HOST_WIDE_INT ival)
699 /* OK if ldo, ldil, or zdepi, can be used. */
700 return (VAL_14_BITS_P (ival)
701 || pa_ldil_cint_p (ival)
702 || pa_zdepi_cint_p (ival));
705 /* True iff ldil can be used to load this CONST_INT. The least
706 significant 11 bits of the value must be zero and the value must
707 not change sign when extended from 32 to 64 bits. */
709 pa_ldil_cint_p (HOST_WIDE_INT ival)
711 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
713 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
716 /* True iff zdepi can be used to generate this CONST_INT.
717 zdepi first sign extends a 5-bit signed number to a given field
718 length, then places this field anywhere in a zero. */
720 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
722 unsigned HOST_WIDE_INT lsb_mask, t;
724 /* This might not be obvious, but it's at least fast.
725 This function is critical; we don't have the time loops would take. */
727 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
728 /* Return true iff t is a power of two. */
729 return ((t & (t - 1)) == 0);
732 /* True iff depi or extru can be used to compute (reg & mask).
733 Accept bit pattern like these:
738 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
741 mask += mask & -mask;
742 return (mask & (mask - 1)) == 0;
745 /* True iff depi can be used to compute (reg | MASK). */
747 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
749 mask += mask & -mask;
750 return (mask & (mask - 1)) == 0;
753 /* Legitimize PIC addresses. If the address is already
754 position-independent, we return ORIG. Newly generated
755 position-independent addresses go to REG. If we need more
756 than one register, we lose. */
759 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
763 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
765 /* Labels need special handling. */
766 if (pic_label_operand (orig, mode))
770 /* We do not want to go through the movXX expanders here since that
771 would create recursion.
773 Nor do we really want to call a generator for a named pattern
774 since that requires multiple patterns if we want to support
777 So instead we just emit the raw set, which avoids the movXX
778 expanders completely. */
779 mark_reg_pointer (reg, BITS_PER_UNIT);
780 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
782 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
783 add_reg_note (insn, REG_EQUAL, orig);
785 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
786 and update LABEL_NUSES because this is not done automatically. */
787 if (reload_in_progress || reload_completed)
789 /* Extract LABEL_REF. */
790 if (GET_CODE (orig) == CONST)
791 orig = XEXP (XEXP (orig, 0), 0);
792 /* Extract CODE_LABEL. */
793 orig = XEXP (orig, 0);
794 add_reg_note (insn, REG_LABEL_OPERAND, orig);
795 /* Make sure we have label and not a note. */
797 LABEL_NUSES (orig)++;
799 crtl->uses_pic_offset_table = 1;
802 if (GET_CODE (orig) == SYMBOL_REF)
808 /* Before reload, allocate a temporary register for the intermediate
809 result. This allows the sequence to be deleted when the final
810 result is unused and the insns are trivially dead. */
811 tmp_reg = ((reload_in_progress || reload_completed)
812 ? reg : gen_reg_rtx (Pmode));
814 if (function_label_operand (orig, VOIDmode))
816 /* Force function label into memory in word mode. */
817 orig = XEXP (force_const_mem (word_mode, orig), 0);
818 /* Load plabel address from DLT. */
819 emit_move_insn (tmp_reg,
820 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
821 gen_rtx_HIGH (word_mode, orig)));
823 = gen_const_mem (Pmode,
824 gen_rtx_LO_SUM (Pmode, tmp_reg,
825 gen_rtx_UNSPEC (Pmode,
828 emit_move_insn (reg, pic_ref);
829 /* Now load address of function descriptor. */
830 pic_ref = gen_rtx_MEM (Pmode, reg);
834 /* Load symbol reference from DLT. */
835 emit_move_insn (tmp_reg,
836 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
837 gen_rtx_HIGH (word_mode, orig)));
839 = gen_const_mem (Pmode,
840 gen_rtx_LO_SUM (Pmode, tmp_reg,
841 gen_rtx_UNSPEC (Pmode,
846 crtl->uses_pic_offset_table = 1;
847 mark_reg_pointer (reg, BITS_PER_UNIT);
848 insn = emit_move_insn (reg, pic_ref);
850 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
851 set_unique_reg_note (insn, REG_EQUAL, orig);
855 else if (GET_CODE (orig) == CONST)
859 if (GET_CODE (XEXP (orig, 0)) == PLUS
860 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
864 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
866 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
867 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
868 base == reg ? 0 : reg);
870 if (GET_CODE (orig) == CONST_INT)
872 if (INT_14_BITS (orig))
873 return plus_constant (Pmode, base, INTVAL (orig));
874 orig = force_reg (Pmode, orig);
876 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
877 /* Likewise, should we set special REG_NOTEs here? */
883 static GTY(()) rtx gen_tls_tga;
886 gen_tls_get_addr (void)
889 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
894 hppa_tls_call (rtx arg)
898 ret = gen_reg_rtx (Pmode);
899 emit_library_call_value (gen_tls_get_addr (), ret,
900 LCT_CONST, Pmode, 1, arg, Pmode);
906 legitimize_tls_address (rtx addr)
908 rtx ret, insn, tmp, t1, t2, tp;
909 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
913 case TLS_MODEL_GLOBAL_DYNAMIC:
914 tmp = gen_reg_rtx (Pmode);
916 emit_insn (gen_tgd_load_pic (tmp, addr));
918 emit_insn (gen_tgd_load (tmp, addr));
919 ret = hppa_tls_call (tmp);
922 case TLS_MODEL_LOCAL_DYNAMIC:
923 ret = gen_reg_rtx (Pmode);
924 tmp = gen_reg_rtx (Pmode);
927 emit_insn (gen_tld_load_pic (tmp, addr));
929 emit_insn (gen_tld_load (tmp, addr));
930 t1 = hppa_tls_call (tmp);
933 t2 = gen_reg_rtx (Pmode);
934 emit_libcall_block (insn, t2, t1,
935 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
937 emit_insn (gen_tld_offset_load (ret, addr, t2));
940 case TLS_MODEL_INITIAL_EXEC:
941 tp = gen_reg_rtx (Pmode);
942 tmp = gen_reg_rtx (Pmode);
943 ret = gen_reg_rtx (Pmode);
944 emit_insn (gen_tp_load (tp));
946 emit_insn (gen_tie_load_pic (tmp, addr));
948 emit_insn (gen_tie_load (tmp, addr));
949 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
952 case TLS_MODEL_LOCAL_EXEC:
953 tp = gen_reg_rtx (Pmode);
954 ret = gen_reg_rtx (Pmode);
955 emit_insn (gen_tp_load (tp));
956 emit_insn (gen_tle_load (ret, addr, tp));
966 /* Try machine-dependent ways of modifying an illegitimate address
967 to be legitimate. If we find one, return the new, valid address.
968 This macro is used in only one place: `memory_address' in explow.c.
970 OLDX is the address as it was before break_out_memory_refs was called.
971 In some cases it is useful to look at this to decide what needs to be done.
973 It is always safe for this macro to do nothing. It exists to recognize
974 opportunities to optimize the output.
976 For the PA, transform:
978 memory(X + <large int>)
982 if (<large int> & mask) >= 16
983 Y = (<large int> & ~mask) + mask + 1 Round up.
985 Y = (<large int> & ~mask) Round down.
987 memory (Z + (<large int> - Y));
989 This is for CSE to find several similar references, and only use one Z.
991 X can either be a SYMBOL_REF or REG, but because combine cannot
992 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
993 D will not fit in 14 bits.
995 MODE_FLOAT references allow displacements which fit in 5 bits, so use
998 MODE_INT references allow displacements which fit in 14 bits, so use
1001 This relies on the fact that most mode MODE_FLOAT references will use FP
1002 registers and most mode MODE_INT references will use integer registers.
1003 (In the rare case of an FP register used in an integer MODE, we depend
1004 on secondary reloads to clean things up.)
1007 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1008 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1009 addressing modes to be used).
1011 Put X and Z into registers. Then put the entire expression into
1015 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1016 enum machine_mode mode)
1020 /* We need to canonicalize the order of operands in unscaled indexed
1021 addresses since the code that checks if an address is valid doesn't
1022 always try both orders. */
1023 if (!TARGET_NO_SPACE_REGS
1024 && GET_CODE (x) == PLUS
1025 && GET_MODE (x) == Pmode
1026 && REG_P (XEXP (x, 0))
1027 && REG_P (XEXP (x, 1))
1028 && REG_POINTER (XEXP (x, 0))
1029 && !REG_POINTER (XEXP (x, 1)))
1030 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1032 if (PA_SYMBOL_REF_TLS_P (x))
1033 return legitimize_tls_address (x);
1035 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1037 /* Strip off CONST. */
1038 if (GET_CODE (x) == CONST)
1041 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1042 That should always be safe. */
1043 if (GET_CODE (x) == PLUS
1044 && GET_CODE (XEXP (x, 0)) == REG
1045 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1047 rtx reg = force_reg (Pmode, XEXP (x, 1));
1048 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1051 /* Note we must reject symbols which represent function addresses
1052 since the assembler/linker can't handle arithmetic on plabels. */
1053 if (GET_CODE (x) == PLUS
1054 && GET_CODE (XEXP (x, 1)) == CONST_INT
1055 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1056 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1057 || GET_CODE (XEXP (x, 0)) == REG))
1059 rtx int_part, ptr_reg;
1061 int offset = INTVAL (XEXP (x, 1));
1064 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1065 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1067 /* Choose which way to round the offset. Round up if we
1068 are >= halfway to the next boundary. */
1069 if ((offset & mask) >= ((mask + 1) / 2))
1070 newoffset = (offset & ~ mask) + mask + 1;
1072 newoffset = (offset & ~ mask);
1074 /* If the newoffset will not fit in 14 bits (ldo), then
1075 handling this would take 4 or 5 instructions (2 to load
1076 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1077 add the new offset and the SYMBOL_REF.) Combine can
1078 not handle 4->2 or 5->2 combinations, so do not create
1080 if (! VAL_14_BITS_P (newoffset)
1081 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1083 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1086 gen_rtx_HIGH (Pmode, const_part));
1089 gen_rtx_LO_SUM (Pmode,
1090 tmp_reg, const_part));
1094 if (! VAL_14_BITS_P (newoffset))
1095 int_part = force_reg (Pmode, GEN_INT (newoffset));
1097 int_part = GEN_INT (newoffset);
1099 ptr_reg = force_reg (Pmode,
1100 gen_rtx_PLUS (Pmode,
1101 force_reg (Pmode, XEXP (x, 0)),
1104 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1107 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1109 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1110 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1111 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1112 && (OBJECT_P (XEXP (x, 1))
1113 || GET_CODE (XEXP (x, 1)) == SUBREG)
1114 && GET_CODE (XEXP (x, 1)) != CONST)
1116 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1120 if (GET_CODE (reg1) != REG)
1121 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1123 reg2 = XEXP (XEXP (x, 0), 0);
1124 if (GET_CODE (reg2) != REG)
1125 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1127 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1128 gen_rtx_MULT (Pmode,
1134 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1136 Only do so for floating point modes since this is more speculative
1137 and we lose if it's an integer store. */
1138 if (GET_CODE (x) == PLUS
1139 && GET_CODE (XEXP (x, 0)) == PLUS
1140 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1141 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1142 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1143 && (mode == SFmode || mode == DFmode))
1146 /* First, try and figure out what to use as a base register. */
1147 rtx reg1, reg2, base, idx;
1149 reg1 = XEXP (XEXP (x, 0), 1);
1154 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1155 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1156 it's a base register below. */
1157 if (GET_CODE (reg1) != REG)
1158 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1160 if (GET_CODE (reg2) != REG)
1161 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1163 /* Figure out what the base and index are. */
1165 if (GET_CODE (reg1) == REG
1166 && REG_POINTER (reg1))
1169 idx = gen_rtx_PLUS (Pmode,
1170 gen_rtx_MULT (Pmode,
1171 XEXP (XEXP (XEXP (x, 0), 0), 0),
1172 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1175 else if (GET_CODE (reg2) == REG
1176 && REG_POINTER (reg2))
1185 /* If the index adds a large constant, try to scale the
1186 constant so that it can be loaded with only one insn. */
1187 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1188 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1189 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1190 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1192 /* Divide the CONST_INT by the scale factor, then add it to A. */
1193 int val = INTVAL (XEXP (idx, 1));
1195 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1196 reg1 = XEXP (XEXP (idx, 0), 0);
1197 if (GET_CODE (reg1) != REG)
1198 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1200 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1202 /* We can now generate a simple scaled indexed address. */
1205 (Pmode, gen_rtx_PLUS (Pmode,
1206 gen_rtx_MULT (Pmode, reg1,
1207 XEXP (XEXP (idx, 0), 1)),
1211 /* If B + C is still a valid base register, then add them. */
1212 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1213 && INTVAL (XEXP (idx, 1)) <= 4096
1214 && INTVAL (XEXP (idx, 1)) >= -4096)
1216 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1219 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1221 reg2 = XEXP (XEXP (idx, 0), 0);
1222 if (GET_CODE (reg2) != CONST_INT)
1223 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1225 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1226 gen_rtx_MULT (Pmode,
1232 /* Get the index into a register, then add the base + index and
1233 return a register holding the result. */
1235 /* First get A into a register. */
1236 reg1 = XEXP (XEXP (idx, 0), 0);
1237 if (GET_CODE (reg1) != REG)
1238 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1240 /* And get B into a register. */
1241 reg2 = XEXP (idx, 1);
1242 if (GET_CODE (reg2) != REG)
1243 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1245 reg1 = force_reg (Pmode,
1246 gen_rtx_PLUS (Pmode,
1247 gen_rtx_MULT (Pmode, reg1,
1248 XEXP (XEXP (idx, 0), 1)),
1251 /* Add the result to our base register and return. */
1252 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1256 /* Uh-oh. We might have an address for x[n-100000]. This needs
1257 special handling to avoid creating an indexed memory address
1258 with x-100000 as the base.
1260 If the constant part is small enough, then it's still safe because
1261 there is a guard page at the beginning and end of the data segment.
1263 Scaled references are common enough that we want to try and rearrange the
1264 terms so that we can use indexing for these addresses too. Only
1265 do the optimization for floatint point modes. */
1267 if (GET_CODE (x) == PLUS
1268 && pa_symbolic_expression_p (XEXP (x, 1)))
1270 /* Ugly. We modify things here so that the address offset specified
1271 by the index expression is computed first, then added to x to form
1272 the entire address. */
1274 rtx regx1, regx2, regy1, regy2, y;
1276 /* Strip off any CONST. */
1278 if (GET_CODE (y) == CONST)
1281 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1283 /* See if this looks like
1284 (plus (mult (reg) (shadd_const))
1285 (const (plus (symbol_ref) (const_int))))
1287 Where const_int is small. In that case the const
1288 expression is a valid pointer for indexing.
1290 If const_int is big, but can be divided evenly by shadd_const
1291 and added to (reg). This allows more scaled indexed addresses. */
1292 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1293 && GET_CODE (XEXP (x, 0)) == MULT
1294 && GET_CODE (XEXP (y, 1)) == CONST_INT
1295 && INTVAL (XEXP (y, 1)) >= -4096
1296 && INTVAL (XEXP (y, 1)) <= 4095
1297 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1298 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1300 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1304 if (GET_CODE (reg1) != REG)
1305 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1307 reg2 = XEXP (XEXP (x, 0), 0);
1308 if (GET_CODE (reg2) != REG)
1309 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1311 return force_reg (Pmode,
1312 gen_rtx_PLUS (Pmode,
1313 gen_rtx_MULT (Pmode,
1318 else if ((mode == DFmode || mode == SFmode)
1319 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1320 && GET_CODE (XEXP (x, 0)) == MULT
1321 && GET_CODE (XEXP (y, 1)) == CONST_INT
1322 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1323 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1324 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1327 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1328 / INTVAL (XEXP (XEXP (x, 0), 1))));
1329 regx2 = XEXP (XEXP (x, 0), 0);
1330 if (GET_CODE (regx2) != REG)
1331 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1332 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1336 gen_rtx_PLUS (Pmode,
1337 gen_rtx_MULT (Pmode, regx2,
1338 XEXP (XEXP (x, 0), 1)),
1339 force_reg (Pmode, XEXP (y, 0))));
1341 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1342 && INTVAL (XEXP (y, 1)) >= -4096
1343 && INTVAL (XEXP (y, 1)) <= 4095)
1345 /* This is safe because of the guard page at the
1346 beginning and end of the data space. Just
1347 return the original address. */
1352 /* Doesn't look like one we can optimize. */
1353 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1354 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1355 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1356 regx1 = force_reg (Pmode,
1357 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1359 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1367 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1369 Compute extra cost of moving data between one register class
1372 Make moves from SAR so expensive they should never happen. We used to
1373 have 0xffff here, but that generates overflow in rare cases.
1375 Copies involving a FP register and a non-FP register are relatively
1376 expensive because they must go through memory.
1378 Other copies are reasonably cheap. */
1381 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1382 reg_class_t from, reg_class_t to)
1384 if (from == SHIFT_REGS)
1386 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1388 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1389 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1395 /* For the HPPA, REG and REG+CONST is cost 0
1396 and addresses involving symbolic constants are cost 2.
1398 PIC addresses are very expensive.
1400 It is no coincidence that this has the same structure
1401 as pa_legitimate_address_p. */
1404 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1405 addr_space_t as ATTRIBUTE_UNUSED,
1406 bool speed ATTRIBUTE_UNUSED)
1408 switch (GET_CODE (X))
1421 /* Compute a (partial) cost for rtx X. Return true if the complete
1422 cost has been computed, and false if subexpressions should be
1423 scanned. In either case, *TOTAL contains the cost result. */
1426 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1427 int *total, bool speed ATTRIBUTE_UNUSED)
1434 if (INTVAL (x) == 0)
1436 else if (INT_14_BITS (x))
1453 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1454 && outer_code != SET)
1461 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1463 *total = COSTS_N_INSNS (3);
1467 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1468 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1472 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1473 *total = factor * factor * COSTS_N_INSNS (8);
1475 *total = factor * factor * COSTS_N_INSNS (20);
1479 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1481 *total = COSTS_N_INSNS (14);
1489 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1490 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1494 *total = factor * factor * COSTS_N_INSNS (60);
1497 case PLUS: /* this includes shNadd insns */
1499 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1501 *total = COSTS_N_INSNS (3);
1505 /* A size N times larger than UNITS_PER_WORD needs N times as
1506 many insns, taking N times as long. */
1507 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1510 *total = factor * COSTS_N_INSNS (1);
1516 *total = COSTS_N_INSNS (1);
1524 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1525 new rtx with the correct mode. */
1527 force_mode (enum machine_mode mode, rtx orig)
1529 if (mode == GET_MODE (orig))
1532 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1534 return gen_rtx_REG (mode, REGNO (orig));
1537 /* Return 1 if *X is a thread-local symbol. */
1540 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1542 return PA_SYMBOL_REF_TLS_P (*x);
1545 /* Return 1 if X contains a thread-local symbol. */
1548 pa_tls_referenced_p (rtx x)
1550 if (!TARGET_HAVE_TLS)
1553 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1556 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1559 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1561 return pa_tls_referenced_p (x);
1564 /* Emit insns to move operands[1] into operands[0].
1566 Return 1 if we have written out everything that needs to be done to
1567 do the move. Otherwise, return 0 and the caller will emit the move
1570 Note SCRATCH_REG may not be in the proper mode depending on how it
1571 will be used. This routine is responsible for creating a new copy
1572 of SCRATCH_REG in the proper mode. */
1575 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1577 register rtx operand0 = operands[0];
1578 register rtx operand1 = operands[1];
1581 /* We can only handle indexed addresses in the destination operand
1582 of floating point stores. Thus, we need to break out indexed
1583 addresses from the destination operand. */
1584 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1586 gcc_assert (can_create_pseudo_p ());
1588 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1589 operand0 = replace_equiv_address (operand0, tem);
1592 /* On targets with non-equivalent space registers, break out unscaled
1593 indexed addresses from the source operand before the final CSE.
1594 We have to do this because the REG_POINTER flag is not correctly
1595 carried through various optimization passes and CSE may substitute
1596 a pseudo without the pointer set for one with the pointer set. As
1597 a result, we loose various opportunities to create insns with
1598 unscaled indexed addresses. */
1599 if (!TARGET_NO_SPACE_REGS
1600 && !cse_not_expected
1601 && GET_CODE (operand1) == MEM
1602 && GET_CODE (XEXP (operand1, 0)) == PLUS
1603 && REG_P (XEXP (XEXP (operand1, 0), 0))
1604 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1606 = replace_equiv_address (operand1,
1607 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1610 && reload_in_progress && GET_CODE (operand0) == REG
1611 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1612 operand0 = reg_equiv_mem (REGNO (operand0));
1613 else if (scratch_reg
1614 && reload_in_progress && GET_CODE (operand0) == SUBREG
1615 && GET_CODE (SUBREG_REG (operand0)) == REG
1616 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1618 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1619 the code which tracks sets/uses for delete_output_reload. */
1620 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1621 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1622 SUBREG_BYTE (operand0));
1623 operand0 = alter_subreg (&temp, true);
1627 && reload_in_progress && GET_CODE (operand1) == REG
1628 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1629 operand1 = reg_equiv_mem (REGNO (operand1));
1630 else if (scratch_reg
1631 && reload_in_progress && GET_CODE (operand1) == SUBREG
1632 && GET_CODE (SUBREG_REG (operand1)) == REG
1633 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1635 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1636 the code which tracks sets/uses for delete_output_reload. */
1637 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1638 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1639 SUBREG_BYTE (operand1));
1640 operand1 = alter_subreg (&temp, true);
1643 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1644 && ((tem = find_replacement (&XEXP (operand0, 0)))
1645 != XEXP (operand0, 0)))
1646 operand0 = replace_equiv_address (operand0, tem);
1648 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1649 && ((tem = find_replacement (&XEXP (operand1, 0)))
1650 != XEXP (operand1, 0)))
1651 operand1 = replace_equiv_address (operand1, tem);
1653 /* Handle secondary reloads for loads/stores of FP registers from
1654 REG+D addresses where D does not fit in 5 or 14 bits, including
1655 (subreg (mem (addr))) cases. */
1657 && fp_reg_operand (operand0, mode)
1658 && (MEM_P (operand1)
1659 || (GET_CODE (operand1) == SUBREG
1660 && MEM_P (XEXP (operand1, 0))))
1661 && !floating_point_store_memory_operand (operand1, mode))
1663 if (GET_CODE (operand1) == SUBREG)
1664 operand1 = XEXP (operand1, 0);
1666 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1667 it in WORD_MODE regardless of what mode it was originally given
1669 scratch_reg = force_mode (word_mode, scratch_reg);
1671 /* D might not fit in 14 bits either; for such cases load D into
1673 if (reg_plus_base_memory_operand (operand1, mode)
1676 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1678 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1679 emit_move_insn (scratch_reg,
1680 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1682 XEXP (XEXP (operand1, 0), 0),
1686 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1687 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1688 replace_equiv_address (operand1, scratch_reg)));
1691 else if (scratch_reg
1692 && fp_reg_operand (operand1, mode)
1693 && (MEM_P (operand0)
1694 || (GET_CODE (operand0) == SUBREG
1695 && MEM_P (XEXP (operand0, 0))))
1696 && !floating_point_store_memory_operand (operand0, mode))
1698 if (GET_CODE (operand0) == SUBREG)
1699 operand0 = XEXP (operand0, 0);
1701 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1702 it in WORD_MODE regardless of what mode it was originally given
1704 scratch_reg = force_mode (word_mode, scratch_reg);
1706 /* D might not fit in 14 bits either; for such cases load D into
1708 if (reg_plus_base_memory_operand (operand0, mode)
1711 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1713 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1714 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1717 XEXP (XEXP (operand0, 0),
1722 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1723 emit_insn (gen_rtx_SET (VOIDmode,
1724 replace_equiv_address (operand0, scratch_reg),
1728 /* Handle secondary reloads for loads of FP registers from constant
1729 expressions by forcing the constant into memory. For the most part,
1730 this is only necessary for SImode and DImode.
1732 Use scratch_reg to hold the address of the memory location. */
1733 else if (scratch_reg
1734 && CONSTANT_P (operand1)
1735 && fp_reg_operand (operand0, mode))
1737 rtx const_mem, xoperands[2];
1739 if (operand1 == CONST0_RTX (mode))
1741 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1745 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1746 it in WORD_MODE regardless of what mode it was originally given
1748 scratch_reg = force_mode (word_mode, scratch_reg);
1750 /* Force the constant into memory and put the address of the
1751 memory location into scratch_reg. */
1752 const_mem = force_const_mem (mode, operand1);
1753 xoperands[0] = scratch_reg;
1754 xoperands[1] = XEXP (const_mem, 0);
1755 pa_emit_move_sequence (xoperands, Pmode, 0);
1757 /* Now load the destination register. */
1758 emit_insn (gen_rtx_SET (mode, operand0,
1759 replace_equiv_address (const_mem, scratch_reg)));
1762 /* Handle secondary reloads for SAR. These occur when trying to load
1763 the SAR from memory or a constant. */
1764 else if (scratch_reg
1765 && GET_CODE (operand0) == REG
1766 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1767 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1768 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1770 /* D might not fit in 14 bits either; for such cases load D into
1772 if (GET_CODE (operand1) == MEM
1773 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1775 /* We are reloading the address into the scratch register, so we
1776 want to make sure the scratch register is a full register. */
1777 scratch_reg = force_mode (word_mode, scratch_reg);
1779 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1780 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1783 XEXP (XEXP (operand1, 0),
1787 /* Now we are going to load the scratch register from memory,
1788 we want to load it in the same width as the original MEM,
1789 which must be the same as the width of the ultimate destination,
1791 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1793 emit_move_insn (scratch_reg,
1794 replace_equiv_address (operand1, scratch_reg));
1798 /* We want to load the scratch register using the same mode as
1799 the ultimate destination. */
1800 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1802 emit_move_insn (scratch_reg, operand1);
1805 /* And emit the insn to set the ultimate destination. We know that
1806 the scratch register has the same mode as the destination at this
1808 emit_move_insn (operand0, scratch_reg);
1811 /* Handle the most common case: storing into a register. */
1812 else if (register_operand (operand0, mode))
1814 /* Legitimize TLS symbol references. This happens for references
1815 that aren't a legitimate constant. */
1816 if (PA_SYMBOL_REF_TLS_P (operand1))
1817 operand1 = legitimize_tls_address (operand1);
1819 if (register_operand (operand1, mode)
1820 || (GET_CODE (operand1) == CONST_INT
1821 && pa_cint_ok_for_move (INTVAL (operand1)))
1822 || (operand1 == CONST0_RTX (mode))
1823 || (GET_CODE (operand1) == HIGH
1824 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1825 /* Only `general_operands' can come here, so MEM is ok. */
1826 || GET_CODE (operand1) == MEM)
1828 /* Various sets are created during RTL generation which don't
1829 have the REG_POINTER flag correctly set. After the CSE pass,
1830 instruction recognition can fail if we don't consistently
1831 set this flag when performing register copies. This should
1832 also improve the opportunities for creating insns that use
1833 unscaled indexing. */
1834 if (REG_P (operand0) && REG_P (operand1))
1836 if (REG_POINTER (operand1)
1837 && !REG_POINTER (operand0)
1838 && !HARD_REGISTER_P (operand0))
1839 copy_reg_pointer (operand0, operand1);
1842 /* When MEMs are broken out, the REG_POINTER flag doesn't
1843 get set. In some cases, we can set the REG_POINTER flag
1844 from the declaration for the MEM. */
1845 if (REG_P (operand0)
1846 && GET_CODE (operand1) == MEM
1847 && !REG_POINTER (operand0))
1849 tree decl = MEM_EXPR (operand1);
1851 /* Set the register pointer flag and register alignment
1852 if the declaration for this memory reference is a
1858 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1860 if (TREE_CODE (decl) == COMPONENT_REF)
1861 decl = TREE_OPERAND (decl, 1);
1863 type = TREE_TYPE (decl);
1864 type = strip_array_types (type);
1866 if (POINTER_TYPE_P (type))
1870 type = TREE_TYPE (type);
1871 /* Using TYPE_ALIGN_OK is rather conservative as
1872 only the ada frontend actually sets it. */
1873 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1875 mark_reg_pointer (operand0, align);
1880 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1884 else if (GET_CODE (operand0) == MEM)
1886 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1887 && !(reload_in_progress || reload_completed))
1889 rtx temp = gen_reg_rtx (DFmode);
1891 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1892 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1895 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1897 /* Run this case quickly. */
1898 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1901 if (! (reload_in_progress || reload_completed))
1903 operands[0] = validize_mem (operand0);
1904 operands[1] = operand1 = force_reg (mode, operand1);
1908 /* Simplify the source if we need to.
1909 Note we do have to handle function labels here, even though we do
1910 not consider them legitimate constants. Loop optimizations can
1911 call the emit_move_xxx with one as a source. */
1912 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1913 || function_label_operand (operand1, VOIDmode)
1914 || (GET_CODE (operand1) == HIGH
1915 && symbolic_operand (XEXP (operand1, 0), mode)))
1919 if (GET_CODE (operand1) == HIGH)
1922 operand1 = XEXP (operand1, 0);
1924 if (symbolic_operand (operand1, mode))
1926 /* Argh. The assembler and linker can't handle arithmetic
1929 So we force the plabel into memory, load operand0 from
1930 the memory location, then add in the constant part. */
1931 if ((GET_CODE (operand1) == CONST
1932 && GET_CODE (XEXP (operand1, 0)) == PLUS
1933 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1935 || function_label_operand (operand1, VOIDmode))
1937 rtx temp, const_part;
1939 /* Figure out what (if any) scratch register to use. */
1940 if (reload_in_progress || reload_completed)
1942 scratch_reg = scratch_reg ? scratch_reg : operand0;
1943 /* SCRATCH_REG will hold an address and maybe the actual
1944 data. We want it in WORD_MODE regardless of what mode it
1945 was originally given to us. */
1946 scratch_reg = force_mode (word_mode, scratch_reg);
1949 scratch_reg = gen_reg_rtx (Pmode);
1951 if (GET_CODE (operand1) == CONST)
1953 /* Save away the constant part of the expression. */
1954 const_part = XEXP (XEXP (operand1, 0), 1);
1955 gcc_assert (GET_CODE (const_part) == CONST_INT);
1957 /* Force the function label into memory. */
1958 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1962 /* No constant part. */
1963 const_part = NULL_RTX;
1965 /* Force the function label into memory. */
1966 temp = force_const_mem (mode, operand1);
1970 /* Get the address of the memory location. PIC-ify it if
1972 temp = XEXP (temp, 0);
1974 temp = legitimize_pic_address (temp, mode, scratch_reg);
1976 /* Put the address of the memory location into our destination
1979 pa_emit_move_sequence (operands, mode, scratch_reg);
1981 /* Now load from the memory location into our destination
1983 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1984 pa_emit_move_sequence (operands, mode, scratch_reg);
1986 /* And add back in the constant part. */
1987 if (const_part != NULL_RTX)
1988 expand_inc (operand0, const_part);
1997 if (reload_in_progress || reload_completed)
1999 temp = scratch_reg ? scratch_reg : operand0;
2000 /* TEMP will hold an address and maybe the actual
2001 data. We want it in WORD_MODE regardless of what mode it
2002 was originally given to us. */
2003 temp = force_mode (word_mode, temp);
2006 temp = gen_reg_rtx (Pmode);
2008 /* (const (plus (symbol) (const_int))) must be forced to
2009 memory during/after reload if the const_int will not fit
2011 if (GET_CODE (operand1) == CONST
2012 && GET_CODE (XEXP (operand1, 0)) == PLUS
2013 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2014 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2015 && (reload_completed || reload_in_progress)
2018 rtx const_mem = force_const_mem (mode, operand1);
2019 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2021 operands[1] = replace_equiv_address (const_mem, operands[1]);
2022 pa_emit_move_sequence (operands, mode, temp);
2026 operands[1] = legitimize_pic_address (operand1, mode, temp);
2027 if (REG_P (operand0) && REG_P (operands[1]))
2028 copy_reg_pointer (operand0, operands[1]);
2029 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2032 /* On the HPPA, references to data space are supposed to use dp,
2033 register 27, but showing it in the RTL inhibits various cse
2034 and loop optimizations. */
2039 if (reload_in_progress || reload_completed)
2041 temp = scratch_reg ? scratch_reg : operand0;
2042 /* TEMP will hold an address and maybe the actual
2043 data. We want it in WORD_MODE regardless of what mode it
2044 was originally given to us. */
2045 temp = force_mode (word_mode, temp);
2048 temp = gen_reg_rtx (mode);
2050 /* Loading a SYMBOL_REF into a register makes that register
2051 safe to be used as the base in an indexed address.
2053 Don't mark hard registers though. That loses. */
2054 if (GET_CODE (operand0) == REG
2055 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2056 mark_reg_pointer (operand0, BITS_PER_UNIT);
2057 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2058 mark_reg_pointer (temp, BITS_PER_UNIT);
2061 set = gen_rtx_SET (mode, operand0, temp);
2063 set = gen_rtx_SET (VOIDmode,
2065 gen_rtx_LO_SUM (mode, temp, operand1));
2067 emit_insn (gen_rtx_SET (VOIDmode,
2069 gen_rtx_HIGH (mode, operand1)));
2075 else if (pa_tls_referenced_p (operand1))
2080 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2082 addend = XEXP (XEXP (tmp, 0), 1);
2083 tmp = XEXP (XEXP (tmp, 0), 0);
2086 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2087 tmp = legitimize_tls_address (tmp);
2090 tmp = gen_rtx_PLUS (mode, tmp, addend);
2091 tmp = force_operand (tmp, operands[0]);
2095 else if (GET_CODE (operand1) != CONST_INT
2096 || !pa_cint_ok_for_move (INTVAL (operand1)))
2100 HOST_WIDE_INT value = 0;
2101 HOST_WIDE_INT insv = 0;
2104 if (GET_CODE (operand1) == CONST_INT)
2105 value = INTVAL (operand1);
2108 && GET_CODE (operand1) == CONST_INT
2109 && HOST_BITS_PER_WIDE_INT > 32
2110 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2114 /* Extract the low order 32 bits of the value and sign extend.
2115 If the new value is the same as the original value, we can
2116 can use the original value as-is. If the new value is
2117 different, we use it and insert the most-significant 32-bits
2118 of the original value into the final result. */
2119 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2120 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2123 #if HOST_BITS_PER_WIDE_INT > 32
2124 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2128 operand1 = GEN_INT (nval);
2132 if (reload_in_progress || reload_completed)
2133 temp = scratch_reg ? scratch_reg : operand0;
2135 temp = gen_reg_rtx (mode);
2137 /* We don't directly split DImode constants on 32-bit targets
2138 because PLUS uses an 11-bit immediate and the insn sequence
2139 generated is not as efficient as the one using HIGH/LO_SUM. */
2140 if (GET_CODE (operand1) == CONST_INT
2141 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2142 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2145 /* Directly break constant into high and low parts. This
2146 provides better optimization opportunities because various
2147 passes recognize constants split with PLUS but not LO_SUM.
2148 We use a 14-bit signed low part except when the addition
2149 of 0x4000 to the high part might change the sign of the
2151 HOST_WIDE_INT low = value & 0x3fff;
2152 HOST_WIDE_INT high = value & ~ 0x3fff;
2156 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2164 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2165 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2169 emit_insn (gen_rtx_SET (VOIDmode, temp,
2170 gen_rtx_HIGH (mode, operand1)));
2171 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2174 insn = emit_move_insn (operands[0], operands[1]);
2176 /* Now insert the most significant 32 bits of the value
2177 into the register. When we don't have a second register
2178 available, it could take up to nine instructions to load
2179 a 64-bit integer constant. Prior to reload, we force
2180 constants that would take more than three instructions
2181 to load to the constant pool. During and after reload,
2182 we have to handle all possible values. */
2185 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2186 register and the value to be inserted is outside the
2187 range that can be loaded with three depdi instructions. */
2188 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2190 operand1 = GEN_INT (insv);
2192 emit_insn (gen_rtx_SET (VOIDmode, temp,
2193 gen_rtx_HIGH (mode, operand1)));
2194 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2196 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2199 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2204 int len = 5, pos = 27;
2206 /* Insert the bits using the depdi instruction. */
2209 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2210 HOST_WIDE_INT sign = v5 < 0;
2212 /* Left extend the insertion. */
2213 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2214 while (pos > 0 && (insv & 1) == sign)
2216 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2222 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2223 GEN_INT (pos), GEN_INT (v5)));
2225 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2226 GEN_INT (pos), GEN_INT (v5)));
2228 len = pos > 0 && pos < 5 ? pos : 5;
2234 set_unique_reg_note (insn, REG_EQUAL, op1);
2239 /* Now have insn-emit do whatever it normally does. */
2243 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2244 it will need a link/runtime reloc). */
2247 pa_reloc_needed (tree exp)
2251 switch (TREE_CODE (exp))
2256 case POINTER_PLUS_EXPR:
2259 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2260 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2264 case NON_LVALUE_EXPR:
2265 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2271 unsigned HOST_WIDE_INT ix;
2273 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2275 reloc |= pa_reloc_needed (value);
2289 /* Return the best assembler insn template
2290 for moving operands[1] into operands[0] as a fullword. */
2292 pa_singlemove_string (rtx *operands)
2294 HOST_WIDE_INT intval;
2296 if (GET_CODE (operands[0]) == MEM)
2297 return "stw %r1,%0";
2298 if (GET_CODE (operands[1]) == MEM)
2300 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2305 gcc_assert (GET_MODE (operands[1]) == SFmode);
2307 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2309 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2310 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2312 operands[1] = GEN_INT (i);
2313 /* Fall through to CONST_INT case. */
2315 if (GET_CODE (operands[1]) == CONST_INT)
2317 intval = INTVAL (operands[1]);
2319 if (VAL_14_BITS_P (intval))
2321 else if ((intval & 0x7ff) == 0)
2322 return "ldil L'%1,%0";
2323 else if (pa_zdepi_cint_p (intval))
2324 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2326 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2328 return "copy %1,%0";
2332 /* Compute position (in OP[1]) and width (in OP[2])
2333 useful for copying IMM to a register using the zdepi
2334 instructions. Store the immediate value to insert in OP[0]. */
2336 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2340 /* Find the least significant set bit in IMM. */
2341 for (lsb = 0; lsb < 32; lsb++)
2348 /* Choose variants based on *sign* of the 5-bit field. */
2349 if ((imm & 0x10) == 0)
2350 len = (lsb <= 28) ? 4 : 32 - lsb;
2353 /* Find the width of the bitstring in IMM. */
2354 for (len = 5; len < 32 - lsb; len++)
2356 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2360 /* Sign extend IMM as a 5-bit value. */
2361 imm = (imm & 0xf) - 0x10;
2369 /* Compute position (in OP[1]) and width (in OP[2])
2370 useful for copying IMM to a register using the depdi,z
2371 instructions. Store the immediate value to insert in OP[0]. */
2374 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2376 int lsb, len, maxlen;
2378 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2380 /* Find the least significant set bit in IMM. */
2381 for (lsb = 0; lsb < maxlen; lsb++)
2388 /* Choose variants based on *sign* of the 5-bit field. */
2389 if ((imm & 0x10) == 0)
2390 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2393 /* Find the width of the bitstring in IMM. */
2394 for (len = 5; len < maxlen - lsb; len++)
2396 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2400 /* Extend length if host is narrow and IMM is negative. */
2401 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2404 /* Sign extend IMM as a 5-bit value. */
2405 imm = (imm & 0xf) - 0x10;
2413 /* Output assembler code to perform a doubleword move insn
2414 with operands OPERANDS. */
2417 pa_output_move_double (rtx *operands)
2419 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2421 rtx addreg0 = 0, addreg1 = 0;
2423 /* First classify both operands. */
2425 if (REG_P (operands[0]))
2427 else if (offsettable_memref_p (operands[0]))
2429 else if (GET_CODE (operands[0]) == MEM)
2434 if (REG_P (operands[1]))
2436 else if (CONSTANT_P (operands[1]))
2438 else if (offsettable_memref_p (operands[1]))
2440 else if (GET_CODE (operands[1]) == MEM)
2445 /* Check for the cases that the operand constraints are not
2446 supposed to allow to happen. */
2447 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2449 /* Handle copies between general and floating registers. */
2451 if (optype0 == REGOP && optype1 == REGOP
2452 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2454 if (FP_REG_P (operands[0]))
2456 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2457 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2458 return "{fldds|fldd} -16(%%sp),%0";
2462 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2463 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2464 return "{ldws|ldw} -12(%%sp),%R0";
2468 /* Handle auto decrementing and incrementing loads and stores
2469 specifically, since the structure of the function doesn't work
2470 for them without major modification. Do it better when we learn
2471 this port about the general inc/dec addressing of PA.
2472 (This was written by tege. Chide him if it doesn't work.) */
2474 if (optype0 == MEMOP)
2476 /* We have to output the address syntax ourselves, since print_operand
2477 doesn't deal with the addresses we want to use. Fix this later. */
2479 rtx addr = XEXP (operands[0], 0);
2480 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2482 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2484 operands[0] = XEXP (addr, 0);
2485 gcc_assert (GET_CODE (operands[1]) == REG
2486 && GET_CODE (operands[0]) == REG);
2488 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2490 /* No overlap between high target register and address
2491 register. (We do this in a non-obvious way to
2492 save a register file writeback) */
2493 if (GET_CODE (addr) == POST_INC)
2494 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2495 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2497 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2499 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2501 operands[0] = XEXP (addr, 0);
2502 gcc_assert (GET_CODE (operands[1]) == REG
2503 && GET_CODE (operands[0]) == REG);
2505 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2506 /* No overlap between high target register and address
2507 register. (We do this in a non-obvious way to save a
2508 register file writeback) */
2509 if (GET_CODE (addr) == PRE_INC)
2510 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2511 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2514 if (optype1 == MEMOP)
2516 /* We have to output the address syntax ourselves, since print_operand
2517 doesn't deal with the addresses we want to use. Fix this later. */
2519 rtx addr = XEXP (operands[1], 0);
2520 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2522 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2524 operands[1] = XEXP (addr, 0);
2525 gcc_assert (GET_CODE (operands[0]) == REG
2526 && GET_CODE (operands[1]) == REG);
2528 if (!reg_overlap_mentioned_p (high_reg, addr))
2530 /* No overlap between high target register and address
2531 register. (We do this in a non-obvious way to
2532 save a register file writeback) */
2533 if (GET_CODE (addr) == POST_INC)
2534 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2535 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2539 /* This is an undefined situation. We should load into the
2540 address register *and* update that register. Probably
2541 we don't need to handle this at all. */
2542 if (GET_CODE (addr) == POST_INC)
2543 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2544 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2547 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2549 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2551 operands[1] = XEXP (addr, 0);
2552 gcc_assert (GET_CODE (operands[0]) == REG
2553 && GET_CODE (operands[1]) == REG);
2555 if (!reg_overlap_mentioned_p (high_reg, addr))
2557 /* No overlap between high target register and address
2558 register. (We do this in a non-obvious way to
2559 save a register file writeback) */
2560 if (GET_CODE (addr) == PRE_INC)
2561 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2562 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2566 /* This is an undefined situation. We should load into the
2567 address register *and* update that register. Probably
2568 we don't need to handle this at all. */
2569 if (GET_CODE (addr) == PRE_INC)
2570 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2571 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2574 else if (GET_CODE (addr) == PLUS
2575 && GET_CODE (XEXP (addr, 0)) == MULT)
2578 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2580 if (!reg_overlap_mentioned_p (high_reg, addr))
2582 xoperands[0] = high_reg;
2583 xoperands[1] = XEXP (addr, 1);
2584 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2585 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2586 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2588 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2592 xoperands[0] = high_reg;
2593 xoperands[1] = XEXP (addr, 1);
2594 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2595 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2596 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2598 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2603 /* If an operand is an unoffsettable memory ref, find a register
2604 we can increment temporarily to make it refer to the second word. */
2606 if (optype0 == MEMOP)
2607 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2609 if (optype1 == MEMOP)
2610 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2612 /* Ok, we can do one word at a time.
2613 Normally we do the low-numbered word first.
2615 In either case, set up in LATEHALF the operands to use
2616 for the high-numbered word and in some cases alter the
2617 operands in OPERANDS to be suitable for the low-numbered word. */
2619 if (optype0 == REGOP)
2620 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2621 else if (optype0 == OFFSOP)
2622 latehalf[0] = adjust_address (operands[0], SImode, 4);
2624 latehalf[0] = operands[0];
2626 if (optype1 == REGOP)
2627 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2628 else if (optype1 == OFFSOP)
2629 latehalf[1] = adjust_address (operands[1], SImode, 4);
2630 else if (optype1 == CNSTOP)
2631 split_double (operands[1], &operands[1], &latehalf[1]);
2633 latehalf[1] = operands[1];
2635 /* If the first move would clobber the source of the second one,
2636 do them in the other order.
2638 This can happen in two cases:
2640 mem -> register where the first half of the destination register
2641 is the same register used in the memory's address. Reload
2642 can create such insns.
2644 mem in this case will be either register indirect or register
2645 indirect plus a valid offset.
2647 register -> register move where REGNO(dst) == REGNO(src + 1)
2648 someone (Tim/Tege?) claimed this can happen for parameter loads.
2650 Handle mem -> register case first. */
2651 if (optype0 == REGOP
2652 && (optype1 == MEMOP || optype1 == OFFSOP)
2653 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2656 /* Do the late half first. */
2658 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2659 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2663 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2664 return pa_singlemove_string (operands);
2667 /* Now handle register -> register case. */
2668 if (optype0 == REGOP && optype1 == REGOP
2669 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2671 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2672 return pa_singlemove_string (operands);
2675 /* Normal case: do the two words, low-numbered first. */
2677 output_asm_insn (pa_singlemove_string (operands), operands);
2679 /* Make any unoffsettable addresses point at high-numbered word. */
2681 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2683 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2686 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2688 /* Undo the adds we just did. */
2690 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2692 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2698 pa_output_fp_move_double (rtx *operands)
2700 if (FP_REG_P (operands[0]))
2702 if (FP_REG_P (operands[1])
2703 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2704 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2706 output_asm_insn ("fldd%F1 %1,%0", operands);
2708 else if (FP_REG_P (operands[1]))
2710 output_asm_insn ("fstd%F0 %1,%0", operands);
2716 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2718 /* This is a pain. You have to be prepared to deal with an
2719 arbitrary address here including pre/post increment/decrement.
2721 so avoid this in the MD. */
2722 gcc_assert (GET_CODE (operands[0]) == REG);
2724 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2725 xoperands[0] = operands[0];
2726 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2731 /* Return a REG that occurs in ADDR with coefficient 1.
2732 ADDR can be effectively incremented by incrementing REG. */
2735 find_addr_reg (rtx addr)
2737 while (GET_CODE (addr) == PLUS)
2739 if (GET_CODE (XEXP (addr, 0)) == REG)
2740 addr = XEXP (addr, 0);
2741 else if (GET_CODE (XEXP (addr, 1)) == REG)
2742 addr = XEXP (addr, 1);
2743 else if (CONSTANT_P (XEXP (addr, 0)))
2744 addr = XEXP (addr, 1);
2745 else if (CONSTANT_P (XEXP (addr, 1)))
2746 addr = XEXP (addr, 0);
2750 gcc_assert (GET_CODE (addr) == REG);
2754 /* Emit code to perform a block move.
2756 OPERANDS[0] is the destination pointer as a REG, clobbered.
2757 OPERANDS[1] is the source pointer as a REG, clobbered.
2758 OPERANDS[2] is a register for temporary storage.
2759 OPERANDS[3] is a register for temporary storage.
2760 OPERANDS[4] is the size as a CONST_INT
2761 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2762 OPERANDS[6] is another temporary register. */
2765 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2767 int align = INTVAL (operands[5]);
2768 unsigned long n_bytes = INTVAL (operands[4]);
2770 /* We can't move more than a word at a time because the PA
2771 has no longer integer move insns. (Could use fp mem ops?) */
2772 if (align > (TARGET_64BIT ? 8 : 4))
2773 align = (TARGET_64BIT ? 8 : 4);
2775 /* Note that we know each loop below will execute at least twice
2776 (else we would have open-coded the copy). */
2780 /* Pre-adjust the loop counter. */
2781 operands[4] = GEN_INT (n_bytes - 16);
2782 output_asm_insn ("ldi %4,%2", operands);
2785 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2786 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2787 output_asm_insn ("std,ma %3,8(%0)", operands);
2788 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2789 output_asm_insn ("std,ma %6,8(%0)", operands);
2791 /* Handle the residual. There could be up to 7 bytes of
2792 residual to copy! */
2793 if (n_bytes % 16 != 0)
2795 operands[4] = GEN_INT (n_bytes % 8);
2796 if (n_bytes % 16 >= 8)
2797 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2798 if (n_bytes % 8 != 0)
2799 output_asm_insn ("ldd 0(%1),%6", operands);
2800 if (n_bytes % 16 >= 8)
2801 output_asm_insn ("std,ma %3,8(%0)", operands);
2802 if (n_bytes % 8 != 0)
2803 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2808 /* Pre-adjust the loop counter. */
2809 operands[4] = GEN_INT (n_bytes - 8);
2810 output_asm_insn ("ldi %4,%2", operands);
2813 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2814 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2815 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2816 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2817 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2819 /* Handle the residual. There could be up to 7 bytes of
2820 residual to copy! */
2821 if (n_bytes % 8 != 0)
2823 operands[4] = GEN_INT (n_bytes % 4);
2824 if (n_bytes % 8 >= 4)
2825 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2826 if (n_bytes % 4 != 0)
2827 output_asm_insn ("ldw 0(%1),%6", operands);
2828 if (n_bytes % 8 >= 4)
2829 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2830 if (n_bytes % 4 != 0)
2831 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2836 /* Pre-adjust the loop counter. */
2837 operands[4] = GEN_INT (n_bytes - 4);
2838 output_asm_insn ("ldi %4,%2", operands);
2841 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2842 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2843 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2844 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2845 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2847 /* Handle the residual. */
2848 if (n_bytes % 4 != 0)
2850 if (n_bytes % 4 >= 2)
2851 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2852 if (n_bytes % 2 != 0)
2853 output_asm_insn ("ldb 0(%1),%6", operands);
2854 if (n_bytes % 4 >= 2)
2855 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2856 if (n_bytes % 2 != 0)
2857 output_asm_insn ("stb %6,0(%0)", operands);
2862 /* Pre-adjust the loop counter. */
2863 operands[4] = GEN_INT (n_bytes - 2);
2864 output_asm_insn ("ldi %4,%2", operands);
2867 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2868 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2869 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2870 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2871 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2873 /* Handle the residual. */
2874 if (n_bytes % 2 != 0)
2876 output_asm_insn ("ldb 0(%1),%3", operands);
2877 output_asm_insn ("stb %3,0(%0)", operands);
2886 /* Count the number of insns necessary to handle this block move.
2888 Basic structure is the same as emit_block_move, except that we
2889 count insns rather than emit them. */
2892 compute_movmem_length (rtx insn)
2894 rtx pat = PATTERN (insn);
2895 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2896 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2897 unsigned int n_insns = 0;
2899 /* We can't move more than four bytes at a time because the PA
2900 has no longer integer move insns. (Could use fp mem ops?) */
2901 if (align > (TARGET_64BIT ? 8 : 4))
2902 align = (TARGET_64BIT ? 8 : 4);
2904 /* The basic copying loop. */
2908 if (n_bytes % (2 * align) != 0)
2910 if ((n_bytes % (2 * align)) >= align)
2913 if ((n_bytes % align) != 0)
2917 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2921 /* Emit code to perform a block clear.
2923 OPERANDS[0] is the destination pointer as a REG, clobbered.
2924 OPERANDS[1] is a register for temporary storage.
2925 OPERANDS[2] is the size as a CONST_INT
2926 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2929 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2931 int align = INTVAL (operands[3]);
2932 unsigned long n_bytes = INTVAL (operands[2]);
2934 /* We can't clear more than a word at a time because the PA
2935 has no longer integer move insns. */
2936 if (align > (TARGET_64BIT ? 8 : 4))
2937 align = (TARGET_64BIT ? 8 : 4);
2939 /* Note that we know each loop below will execute at least twice
2940 (else we would have open-coded the copy). */
2944 /* Pre-adjust the loop counter. */
2945 operands[2] = GEN_INT (n_bytes - 16);
2946 output_asm_insn ("ldi %2,%1", operands);
2949 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2950 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2951 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2953 /* Handle the residual. There could be up to 7 bytes of
2954 residual to copy! */
2955 if (n_bytes % 16 != 0)
2957 operands[2] = GEN_INT (n_bytes % 8);
2958 if (n_bytes % 16 >= 8)
2959 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2960 if (n_bytes % 8 != 0)
2961 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2966 /* Pre-adjust the loop counter. */
2967 operands[2] = GEN_INT (n_bytes - 8);
2968 output_asm_insn ("ldi %2,%1", operands);
2971 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2972 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2973 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2975 /* Handle the residual. There could be up to 7 bytes of
2976 residual to copy! */
2977 if (n_bytes % 8 != 0)
2979 operands[2] = GEN_INT (n_bytes % 4);
2980 if (n_bytes % 8 >= 4)
2981 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2982 if (n_bytes % 4 != 0)
2983 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2988 /* Pre-adjust the loop counter. */
2989 operands[2] = GEN_INT (n_bytes - 4);
2990 output_asm_insn ("ldi %2,%1", operands);
2993 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2994 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2995 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2997 /* Handle the residual. */
2998 if (n_bytes % 4 != 0)
3000 if (n_bytes % 4 >= 2)
3001 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3002 if (n_bytes % 2 != 0)
3003 output_asm_insn ("stb %%r0,0(%0)", operands);
3008 /* Pre-adjust the loop counter. */
3009 operands[2] = GEN_INT (n_bytes - 2);
3010 output_asm_insn ("ldi %2,%1", operands);
3013 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3014 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3015 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3017 /* Handle the residual. */
3018 if (n_bytes % 2 != 0)
3019 output_asm_insn ("stb %%r0,0(%0)", operands);
3028 /* Count the number of insns necessary to handle this block move.
3030 Basic structure is the same as emit_block_move, except that we
3031 count insns rather than emit them. */
3034 compute_clrmem_length (rtx insn)
3036 rtx pat = PATTERN (insn);
3037 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3038 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3039 unsigned int n_insns = 0;
3041 /* We can't clear more than a word at a time because the PA
3042 has no longer integer move insns. */
3043 if (align > (TARGET_64BIT ? 8 : 4))
3044 align = (TARGET_64BIT ? 8 : 4);
3046 /* The basic loop. */
3050 if (n_bytes % (2 * align) != 0)
3052 if ((n_bytes % (2 * align)) >= align)
3055 if ((n_bytes % align) != 0)
3059 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3065 pa_output_and (rtx *operands)
3067 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3069 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3070 int ls0, ls1, ms0, p, len;
3072 for (ls0 = 0; ls0 < 32; ls0++)
3073 if ((mask & (1 << ls0)) == 0)
3076 for (ls1 = ls0; ls1 < 32; ls1++)
3077 if ((mask & (1 << ls1)) != 0)
3080 for (ms0 = ls1; ms0 < 32; ms0++)
3081 if ((mask & (1 << ms0)) == 0)
3084 gcc_assert (ms0 == 32);
3092 operands[2] = GEN_INT (len);
3093 return "{extru|extrw,u} %1,31,%2,%0";
3097 /* We could use this `depi' for the case above as well, but `depi'
3098 requires one more register file access than an `extru'. */
3103 operands[2] = GEN_INT (p);
3104 operands[3] = GEN_INT (len);
3105 return "{depi|depwi} 0,%2,%3,%0";
3109 return "and %1,%2,%0";
3112 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3113 storing the result in operands[0]. */
3115 pa_output_64bit_and (rtx *operands)
3117 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3119 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3120 int ls0, ls1, ms0, p, len;
3122 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3123 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3126 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3127 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3130 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3131 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3134 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3136 if (ls1 == HOST_BITS_PER_WIDE_INT)
3142 operands[2] = GEN_INT (len);
3143 return "extrd,u %1,63,%2,%0";
3147 /* We could use this `depi' for the case above as well, but `depi'
3148 requires one more register file access than an `extru'. */
3153 operands[2] = GEN_INT (p);
3154 operands[3] = GEN_INT (len);
3155 return "depdi 0,%2,%3,%0";
3159 return "and %1,%2,%0";
3163 pa_output_ior (rtx *operands)
3165 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3166 int bs0, bs1, p, len;
3168 if (INTVAL (operands[2]) == 0)
3169 return "copy %1,%0";
3171 for (bs0 = 0; bs0 < 32; bs0++)
3172 if ((mask & (1 << bs0)) != 0)
3175 for (bs1 = bs0; bs1 < 32; bs1++)
3176 if ((mask & (1 << bs1)) == 0)
3179 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3184 operands[2] = GEN_INT (p);
3185 operands[3] = GEN_INT (len);
3186 return "{depi|depwi} -1,%2,%3,%0";
3189 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3190 storing the result in operands[0]. */
3192 pa_output_64bit_ior (rtx *operands)
3194 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3195 int bs0, bs1, p, len;
3197 if (INTVAL (operands[2]) == 0)
3198 return "copy %1,%0";
3200 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3201 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3204 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3205 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3208 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3209 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3214 operands[2] = GEN_INT (p);
3215 operands[3] = GEN_INT (len);
3216 return "depdi -1,%2,%3,%0";
3219 /* Target hook for assembling integer objects. This code handles
3220 aligned SI and DI integers specially since function references
3221 must be preceded by P%. */
3224 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3226 if (size == UNITS_PER_WORD
3228 && function_label_operand (x, VOIDmode))
3230 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3231 output_addr_const (asm_out_file, x);
3232 fputc ('\n', asm_out_file);
3235 return default_assemble_integer (x, size, aligned_p);
3238 /* Output an ascii string. */
3240 pa_output_ascii (FILE *file, const char *p, int size)
3244 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3246 /* The HP assembler can only take strings of 256 characters at one
3247 time. This is a limitation on input line length, *not* the
3248 length of the string. Sigh. Even worse, it seems that the
3249 restriction is in number of input characters (see \xnn &
3250 \whatever). So we have to do this very carefully. */
3252 fputs ("\t.STRING \"", file);
3255 for (i = 0; i < size; i += 4)
3259 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3261 register unsigned int c = (unsigned char) p[i + io];
3263 if (c == '\"' || c == '\\')
3264 partial_output[co++] = '\\';
3265 if (c >= ' ' && c < 0177)
3266 partial_output[co++] = c;
3270 partial_output[co++] = '\\';
3271 partial_output[co++] = 'x';
3272 hexd = c / 16 - 0 + '0';
3274 hexd -= '9' - 'a' + 1;
3275 partial_output[co++] = hexd;
3276 hexd = c % 16 - 0 + '0';
3278 hexd -= '9' - 'a' + 1;
3279 partial_output[co++] = hexd;
3282 if (chars_output + co > 243)
3284 fputs ("\"\n\t.STRING \"", file);
3287 fwrite (partial_output, 1, (size_t) co, file);
3291 fputs ("\"\n", file);
3294 /* Try to rewrite floating point comparisons & branches to avoid
3295 useless add,tr insns.
3297 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3298 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3299 first attempt to remove useless add,tr insns. It is zero
3300 for the second pass as reorg sometimes leaves bogus REG_DEAD
3303 When CHECK_NOTES is zero we can only eliminate add,tr insns
3304 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3307 remove_useless_addtr_insns (int check_notes)
3310 static int pass = 0;
3312 /* This is fairly cheap, so always run it when optimizing. */
3316 int fbranch_count = 0;
3318 /* Walk all the insns in this function looking for fcmp & fbranch
3319 instructions. Keep track of how many of each we find. */
3320 for (insn = get_insns (); insn; insn = next_insn (insn))
3324 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3325 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3328 tmp = PATTERN (insn);
3330 /* It must be a set. */
3331 if (GET_CODE (tmp) != SET)
3334 /* If the destination is CCFP, then we've found an fcmp insn. */
3335 tmp = SET_DEST (tmp);
3336 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3342 tmp = PATTERN (insn);
3343 /* If this is an fbranch instruction, bump the fbranch counter. */
3344 if (GET_CODE (tmp) == SET
3345 && SET_DEST (tmp) == pc_rtx
3346 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3347 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3348 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3349 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3357 /* Find all floating point compare + branch insns. If possible,
3358 reverse the comparison & the branch to avoid add,tr insns. */
3359 for (insn = get_insns (); insn; insn = next_insn (insn))
3363 /* Ignore anything that isn't an INSN. */
3364 if (GET_CODE (insn) != INSN)
3367 tmp = PATTERN (insn);
3369 /* It must be a set. */
3370 if (GET_CODE (tmp) != SET)
3373 /* The destination must be CCFP, which is register zero. */
3374 tmp = SET_DEST (tmp);
3375 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3378 /* INSN should be a set of CCFP.
3380 See if the result of this insn is used in a reversed FP
3381 conditional branch. If so, reverse our condition and
3382 the branch. Doing so avoids useless add,tr insns. */
3383 next = next_insn (insn);
3386 /* Jumps, calls and labels stop our search. */
3387 if (GET_CODE (next) == JUMP_INSN
3388 || GET_CODE (next) == CALL_INSN
3389 || GET_CODE (next) == CODE_LABEL)
3392 /* As does another fcmp insn. */
3393 if (GET_CODE (next) == INSN
3394 && GET_CODE (PATTERN (next)) == SET
3395 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3396 && REGNO (SET_DEST (PATTERN (next))) == 0)
3399 next = next_insn (next);
3402 /* Is NEXT_INSN a branch? */
3404 && GET_CODE (next) == JUMP_INSN)
3406 rtx pattern = PATTERN (next);
3408 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3409 and CCFP dies, then reverse our conditional and the branch
3410 to avoid the add,tr. */
3411 if (GET_CODE (pattern) == SET
3412 && SET_DEST (pattern) == pc_rtx
3413 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3414 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3415 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3416 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3417 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3418 && (fcmp_count == fbranch_count
3420 && find_regno_note (next, REG_DEAD, 0))))
3422 /* Reverse the branch. */
3423 tmp = XEXP (SET_SRC (pattern), 1);
3424 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3425 XEXP (SET_SRC (pattern), 2) = tmp;
3426 INSN_CODE (next) = -1;
3428 /* Reverse our condition. */
3429 tmp = PATTERN (insn);
3430 PUT_CODE (XEXP (tmp, 1),
3431 (reverse_condition_maybe_unordered
3432 (GET_CODE (XEXP (tmp, 1)))));
3442 /* You may have trouble believing this, but this is the 32 bit HP-PA
3447 Variable arguments (optional; any number may be allocated)
3449 SP-(4*(N+9)) arg word N
3454 Fixed arguments (must be allocated; may remain unused)
3463 SP-32 External Data Pointer (DP)
3465 SP-24 External/stub RP (RP')
3469 SP-8 Calling Stub RP (RP'')
3474 SP-0 Stack Pointer (points to next available address)
3478 /* This function saves registers as follows. Registers marked with ' are
3479 this function's registers (as opposed to the previous function's).
3480 If a frame_pointer isn't needed, r4 is saved as a general register;
3481 the space for the frame pointer is still allocated, though, to keep
3487 SP (FP') Previous FP
3488 SP + 4 Alignment filler (sigh)
3489 SP + 8 Space for locals reserved here.
3493 SP + n All call saved register used.
3497 SP + o All call saved fp registers used.
3501 SP + p (SP') points to next available address.
3505 /* Global variables set by output_function_prologue(). */
3506 /* Size of frame. Need to know this to emit return insns from
3508 static HOST_WIDE_INT actual_fsize, local_fsize;
3509 static int save_fregs;
3511 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3512 Handle case where DISP > 8k by using the add_high_const patterns.
3514 Note in DISP > 8k case, we will leave the high part of the address
3515 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3518 store_reg (int reg, HOST_WIDE_INT disp, int base)
3520 rtx insn, dest, src, basereg;
3522 src = gen_rtx_REG (word_mode, reg);
3523 basereg = gen_rtx_REG (Pmode, base);
3524 if (VAL_14_BITS_P (disp))
3526 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3527 insn = emit_move_insn (dest, src);
3529 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3531 rtx delta = GEN_INT (disp);
3532 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3534 emit_move_insn (tmpreg, delta);
3535 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3538 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3539 gen_rtx_SET (VOIDmode, tmpreg,
3540 gen_rtx_PLUS (Pmode, basereg, delta)));
3541 RTX_FRAME_RELATED_P (insn) = 1;
3543 dest = gen_rtx_MEM (word_mode, tmpreg);
3544 insn = emit_move_insn (dest, src);
3548 rtx delta = GEN_INT (disp);
3549 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3550 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3552 emit_move_insn (tmpreg, high);
3553 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3554 insn = emit_move_insn (dest, src);
3556 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3557 gen_rtx_SET (VOIDmode,
3558 gen_rtx_MEM (word_mode,
3559 gen_rtx_PLUS (word_mode,
3566 RTX_FRAME_RELATED_P (insn) = 1;
3569 /* Emit RTL to store REG at the memory location specified by BASE and then
3570 add MOD to BASE. MOD must be <= 8k. */
3573 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3575 rtx insn, basereg, srcreg, delta;
3577 gcc_assert (VAL_14_BITS_P (mod));
3579 basereg = gen_rtx_REG (Pmode, base);
3580 srcreg = gen_rtx_REG (word_mode, reg);
3581 delta = GEN_INT (mod);
3583 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3586 RTX_FRAME_RELATED_P (insn) = 1;
3588 /* RTX_FRAME_RELATED_P must be set on each frame related set
3589 in a parallel with more than one element. */
3590 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3591 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3595 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3596 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3597 whether to add a frame note or not.
3599 In the DISP > 8k case, we leave the high part of the address in %r1.
3600 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3603 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3607 if (VAL_14_BITS_P (disp))
3609 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3610 plus_constant (Pmode,
3611 gen_rtx_REG (Pmode, base), disp));
3613 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3615 rtx basereg = gen_rtx_REG (Pmode, base);
3616 rtx delta = GEN_INT (disp);
3617 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3619 emit_move_insn (tmpreg, delta);
3620 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3621 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3623 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3624 gen_rtx_SET (VOIDmode, tmpreg,
3625 gen_rtx_PLUS (Pmode, basereg, delta)));
3629 rtx basereg = gen_rtx_REG (Pmode, base);
3630 rtx delta = GEN_INT (disp);
3631 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3633 emit_move_insn (tmpreg,
3634 gen_rtx_PLUS (Pmode, basereg,
3635 gen_rtx_HIGH (Pmode, delta)));
3636 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3637 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3640 if (DO_FRAME_NOTES && note)
3641 RTX_FRAME_RELATED_P (insn) = 1;
3645 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3650 /* The code in pa_expand_prologue and pa_expand_epilogue must
3651 be consistent with the rounding and size calculation done here.
3652 Change them at the same time. */
3654 /* We do our own stack alignment. First, round the size of the
3655 stack locals up to a word boundary. */
3656 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3658 /* Space for previous frame pointer + filler. If any frame is
3659 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3660 waste some space here for the sake of HP compatibility. The
3661 first slot is only used when the frame pointer is needed. */
3662 if (size || frame_pointer_needed)
3663 size += STARTING_FRAME_OFFSET;
3665 /* If the current function calls __builtin_eh_return, then we need
3666 to allocate stack space for registers that will hold data for
3667 the exception handler. */
3668 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3672 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3674 size += i * UNITS_PER_WORD;
3677 /* Account for space used by the callee general register saves. */
3678 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3679 if (df_regs_ever_live_p (i))
3680 size += UNITS_PER_WORD;
3682 /* Account for space used by the callee floating point register saves. */
3683 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3684 if (df_regs_ever_live_p (i)
3685 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3689 /* We always save both halves of the FP register, so always
3690 increment the frame size by 8 bytes. */
3694 /* If any of the floating registers are saved, account for the
3695 alignment needed for the floating point register save block. */
3698 size = (size + 7) & ~7;
3703 /* The various ABIs include space for the outgoing parameters in the
3704 size of the current function's stack frame. We don't need to align
3705 for the outgoing arguments as their alignment is set by the final
3706 rounding for the frame as a whole. */
3707 size += crtl->outgoing_args_size;
3709 /* Allocate space for the fixed frame marker. This space must be
3710 allocated for any function that makes calls or allocates
3712 if (!crtl->is_leaf || size)
3713 size += TARGET_64BIT ? 48 : 32;
3715 /* Finally, round to the preferred stack boundary. */
3716 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3717 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3720 /* Generate the assembly code for function entry. FILE is a stdio
3721 stream to output the code to. SIZE is an int: how many units of
3722 temporary storage to allocate.
3724 Refer to the array `regs_ever_live' to determine which registers to
3725 save; `regs_ever_live[I]' is nonzero if register number I is ever
3726 used in the function. This function is responsible for knowing
3727 which registers should not be saved even if used. */
3729 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3730 of memory. If any fpu reg is used in the function, we allocate
3731 such a block here, at the bottom of the frame, just in case it's needed.
3733 If this function is a leaf procedure, then we may choose not
3734 to do a "save" insn. The decision about whether or not
3735 to do this is made in regclass.c. */
3738 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3740 /* The function's label and associated .PROC must never be
3741 separated and must be output *after* any profiling declarations
3742 to avoid changing spaces/subspaces within a procedure. */
3743 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3744 fputs ("\t.PROC\n", file);
3746 /* pa_expand_prologue does the dirty work now. We just need
3747 to output the assembler directives which denote the start
3749 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3751 fputs (",NO_CALLS", file);
3753 fputs (",CALLS", file);
3755 fputs (",SAVE_RP", file);
3757 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3758 at the beginning of the frame and that it is used as the frame
3759 pointer for the frame. We do this because our current frame
3760 layout doesn't conform to that specified in the HP runtime
3761 documentation and we need a way to indicate to programs such as
3762 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3763 isn't used by HP compilers but is supported by the assembler.
3764 However, SAVE_SP is supposed to indicate that the previous stack
3765 pointer has been saved in the frame marker. */
3766 if (frame_pointer_needed)
3767 fputs (",SAVE_SP", file);
3769 /* Pass on information about the number of callee register saves
3770 performed in the prologue.
3772 The compiler is supposed to pass the highest register number
3773 saved, the assembler then has to adjust that number before
3774 entering it into the unwind descriptor (to account for any
3775 caller saved registers with lower register numbers than the
3776 first callee saved register). */
3778 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3781 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3783 fputs ("\n\t.ENTRY\n", file);
3785 remove_useless_addtr_insns (0);
3789 pa_expand_prologue (void)
3791 int merge_sp_adjust_with_store = 0;
3792 HOST_WIDE_INT size = get_frame_size ();
3793 HOST_WIDE_INT offset;
3801 /* Compute total size for frame pointer, filler, locals and rounding to
3802 the next word boundary. Similar code appears in pa_compute_frame_size
3803 and must be changed in tandem with this code. */
3804 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3805 if (local_fsize || frame_pointer_needed)
3806 local_fsize += STARTING_FRAME_OFFSET;
3808 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3809 if (flag_stack_usage_info)
3810 current_function_static_stack_size = actual_fsize;
3812 /* Compute a few things we will use often. */
3813 tmpreg = gen_rtx_REG (word_mode, 1);
3815 /* Save RP first. The calling conventions manual states RP will
3816 always be stored into the caller's frame at sp - 20 or sp - 16
3817 depending on which ABI is in use. */
3818 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3820 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3826 /* Allocate the local frame and set up the frame pointer if needed. */
3827 if (actual_fsize != 0)
3829 if (frame_pointer_needed)
3831 /* Copy the old frame pointer temporarily into %r1. Set up the
3832 new stack pointer, then store away the saved old frame pointer
3833 into the stack at sp and at the same time update the stack
3834 pointer by actual_fsize bytes. Two versions, first
3835 handles small (<8k) frames. The second handles large (>=8k)
3837 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3839 RTX_FRAME_RELATED_P (insn) = 1;
3841 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3843 RTX_FRAME_RELATED_P (insn) = 1;
3845 if (VAL_14_BITS_P (actual_fsize))
3846 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3849 /* It is incorrect to store the saved frame pointer at *sp,
3850 then increment sp (writes beyond the current stack boundary).
3852 So instead use stwm to store at *sp and post-increment the
3853 stack pointer as an atomic operation. Then increment sp to
3854 finish allocating the new frame. */
3855 HOST_WIDE_INT adjust1 = 8192 - 64;
3856 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3858 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3859 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3863 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3864 we need to store the previous stack pointer (frame pointer)
3865 into the frame marker on targets that use the HP unwind
3866 library. This allows the HP unwind library to be used to
3867 unwind GCC frames. However, we are not fully compatible
3868 with the HP library because our frame layout differs from
3869 that specified in the HP runtime specification.
3871 We don't want a frame note on this instruction as the frame
3872 marker moves during dynamic stack allocation.
3874 This instruction also serves as a blockage to prevent
3875 register spills from being scheduled before the stack
3876 pointer is raised. This is necessary as we store
3877 registers using the frame pointer as a base register,
3878 and the frame pointer is set before sp is raised. */
3879 if (TARGET_HPUX_UNWIND_LIBRARY)
3881 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3882 GEN_INT (TARGET_64BIT ? -8 : -4));
3884 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3885 hard_frame_pointer_rtx);
3888 emit_insn (gen_blockage ());
3890 /* no frame pointer needed. */
3893 /* In some cases we can perform the first callee register save
3894 and allocating the stack frame at the same time. If so, just
3895 make a note of it and defer allocating the frame until saving
3896 the callee registers. */
3897 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3898 merge_sp_adjust_with_store = 1;
3899 /* Can not optimize. Adjust the stack frame by actual_fsize
3902 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3907 /* Normal register save.
3909 Do not save the frame pointer in the frame_pointer_needed case. It
3910 was done earlier. */
3911 if (frame_pointer_needed)
3913 offset = local_fsize;
3915 /* Saving the EH return data registers in the frame is the simplest
3916 way to get the frame unwind information emitted. We put them
3917 just before the general registers. */
3918 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3920 unsigned int i, regno;
3924 regno = EH_RETURN_DATA_REGNO (i);
3925 if (regno == INVALID_REGNUM)
3928 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3929 offset += UNITS_PER_WORD;
3933 for (i = 18; i >= 4; i--)
3934 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3936 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3937 offset += UNITS_PER_WORD;
3940 /* Account for %r3 which is saved in a special place. */
3943 /* No frame pointer needed. */
3946 offset = local_fsize - actual_fsize;
3948 /* Saving the EH return data registers in the frame is the simplest
3949 way to get the frame unwind information emitted. */
3950 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3952 unsigned int i, regno;
3956 regno = EH_RETURN_DATA_REGNO (i);
3957 if (regno == INVALID_REGNUM)
3960 /* If merge_sp_adjust_with_store is nonzero, then we can
3961 optimize the first save. */
3962 if (merge_sp_adjust_with_store)
3964 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3965 merge_sp_adjust_with_store = 0;
3968 store_reg (regno, offset, STACK_POINTER_REGNUM);
3969 offset += UNITS_PER_WORD;
3973 for (i = 18; i >= 3; i--)
3974 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3976 /* If merge_sp_adjust_with_store is nonzero, then we can
3977 optimize the first GR save. */
3978 if (merge_sp_adjust_with_store)
3980 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3981 merge_sp_adjust_with_store = 0;
3984 store_reg (i, offset, STACK_POINTER_REGNUM);
3985 offset += UNITS_PER_WORD;
3989 /* If we wanted to merge the SP adjustment with a GR save, but we never
3990 did any GR saves, then just emit the adjustment here. */
3991 if (merge_sp_adjust_with_store)
3992 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3996 /* The hppa calling conventions say that %r19, the pic offset
3997 register, is saved at sp - 32 (in this function's frame)
3998 when generating PIC code. FIXME: What is the correct thing
3999 to do for functions which make no calls and allocate no
4000 frame? Do we need to allocate a frame, or can we just omit
4001 the save? For now we'll just omit the save.
4003 We don't want a note on this insn as the frame marker can
4004 move if there is a dynamic stack allocation. */
4005 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4007 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4009 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4013 /* Align pointer properly (doubleword boundary). */
4014 offset = (offset + 7) & ~7;
4016 /* Floating point register store. */
4021 /* First get the frame or stack pointer to the start of the FP register
4023 if (frame_pointer_needed)
4025 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4026 base = hard_frame_pointer_rtx;
4030 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4031 base = stack_pointer_rtx;
4034 /* Now actually save the FP registers. */
4035 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4037 if (df_regs_ever_live_p (i)
4038 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4040 rtx addr, insn, reg;
4041 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4042 reg = gen_rtx_REG (DFmode, i);
4043 insn = emit_move_insn (addr, reg);
4046 RTX_FRAME_RELATED_P (insn) = 1;
4049 rtx mem = gen_rtx_MEM (DFmode,
4050 plus_constant (Pmode, base,
4052 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4053 gen_rtx_SET (VOIDmode, mem, reg));
4057 rtx meml = gen_rtx_MEM (SFmode,
4058 plus_constant (Pmode, base,
4060 rtx memr = gen_rtx_MEM (SFmode,
4061 plus_constant (Pmode, base,
4063 rtx regl = gen_rtx_REG (SFmode, i);
4064 rtx regr = gen_rtx_REG (SFmode, i + 1);
4065 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4066 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4069 RTX_FRAME_RELATED_P (setl) = 1;
4070 RTX_FRAME_RELATED_P (setr) = 1;
4071 vec = gen_rtvec (2, setl, setr);
4072 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4073 gen_rtx_SEQUENCE (VOIDmode, vec));
4076 offset += GET_MODE_SIZE (DFmode);
4083 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4084 Handle case where DISP > 8k by using the add_high_const patterns. */
4087 load_reg (int reg, HOST_WIDE_INT disp, int base)
4089 rtx dest = gen_rtx_REG (word_mode, reg);
4090 rtx basereg = gen_rtx_REG (Pmode, base);
4093 if (VAL_14_BITS_P (disp))
4094 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4095 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4097 rtx delta = GEN_INT (disp);
4098 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4100 emit_move_insn (tmpreg, delta);
4101 if (TARGET_DISABLE_INDEXING)
4103 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4104 src = gen_rtx_MEM (word_mode, tmpreg);
4107 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4111 rtx delta = GEN_INT (disp);
4112 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4113 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4115 emit_move_insn (tmpreg, high);
4116 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4119 emit_move_insn (dest, src);
4122 /* Update the total code bytes output to the text section. */
4125 update_total_code_bytes (unsigned int nbytes)
4127 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4128 && !IN_NAMED_SECTION_P (cfun->decl))
4130 unsigned int old_total = total_code_bytes;
4132 total_code_bytes += nbytes;
4134 /* Be prepared to handle overflows. */
4135 if (old_total > total_code_bytes)
4136 total_code_bytes = UINT_MAX;
4140 /* This function generates the assembly code for function exit.
4141 Args are as for output_function_prologue ().
4143 The function epilogue should not depend on the current stack
4144 pointer! It should use the frame pointer only. This is mandatory
4145 because of alloca; we also take advantage of it to omit stack
4146 adjustments before returning. */
4149 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4151 rtx insn = get_last_insn ();
4155 /* pa_expand_epilogue does the dirty work now. We just need
4156 to output the assembler directives which denote the end
4159 To make debuggers happy, emit a nop if the epilogue was completely
4160 eliminated due to a volatile call as the last insn in the
4161 current function. That way the return address (in %r2) will
4162 always point to a valid instruction in the current function. */
4164 /* Get the last real insn. */
4165 if (GET_CODE (insn) == NOTE)
4166 insn = prev_real_insn (insn);
4168 /* If it is a sequence, then look inside. */
4169 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4170 insn = XVECEXP (PATTERN (insn), 0, 0);
4172 /* If insn is a CALL_INSN, then it must be a call to a volatile
4173 function (otherwise there would be epilogue insns). */
4174 if (insn && GET_CODE (insn) == CALL_INSN)
4176 fputs ("\tnop\n", file);
4180 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4182 if (TARGET_SOM && TARGET_GAS)
4184 /* We done with this subspace except possibly for some additional
4185 debug information. Forget that we are in this subspace to ensure
4186 that the next function is output in its own subspace. */
4188 cfun->machine->in_nsubspa = 2;
4191 if (INSN_ADDRESSES_SET_P ())
4193 insn = get_last_nonnote_insn ();
4194 last_address += INSN_ADDRESSES (INSN_UID (insn));
4196 last_address += insn_default_length (insn);
4197 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4198 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4201 last_address = UINT_MAX;
4203 /* Finally, update the total number of code bytes output so far. */
4204 update_total_code_bytes (last_address);
4208 pa_expand_epilogue (void)
4211 HOST_WIDE_INT offset;
4212 HOST_WIDE_INT ret_off = 0;
4214 int merge_sp_adjust_with_load = 0;
4216 /* We will use this often. */
4217 tmpreg = gen_rtx_REG (word_mode, 1);
4219 /* Try to restore RP early to avoid load/use interlocks when
4220 RP gets used in the return (bv) instruction. This appears to still
4221 be necessary even when we schedule the prologue and epilogue. */
4224 ret_off = TARGET_64BIT ? -16 : -20;
4225 if (frame_pointer_needed)
4227 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4232 /* No frame pointer, and stack is smaller than 8k. */
4233 if (VAL_14_BITS_P (ret_off - actual_fsize))
4235 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4241 /* General register restores. */
4242 if (frame_pointer_needed)
4244 offset = local_fsize;
4246 /* If the current function calls __builtin_eh_return, then we need
4247 to restore the saved EH data registers. */
4248 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4250 unsigned int i, regno;
4254 regno = EH_RETURN_DATA_REGNO (i);
4255 if (regno == INVALID_REGNUM)
4258 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4259 offset += UNITS_PER_WORD;
4263 for (i = 18; i >= 4; i--)
4264 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4266 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4267 offset += UNITS_PER_WORD;
4272 offset = local_fsize - actual_fsize;
4274 /* If the current function calls __builtin_eh_return, then we need
4275 to restore the saved EH data registers. */
4276 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4278 unsigned int i, regno;
4282 regno = EH_RETURN_DATA_REGNO (i);
4283 if (regno == INVALID_REGNUM)
4286 /* Only for the first load.
4287 merge_sp_adjust_with_load holds the register load
4288 with which we will merge the sp adjustment. */
4289 if (merge_sp_adjust_with_load == 0
4291 && VAL_14_BITS_P (-actual_fsize))
4292 merge_sp_adjust_with_load = regno;
4294 load_reg (regno, offset, STACK_POINTER_REGNUM);
4295 offset += UNITS_PER_WORD;
4299 for (i = 18; i >= 3; i--)
4301 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4303 /* Only for the first load.
4304 merge_sp_adjust_with_load holds the register load
4305 with which we will merge the sp adjustment. */
4306 if (merge_sp_adjust_with_load == 0
4308 && VAL_14_BITS_P (-actual_fsize))
4309 merge_sp_adjust_with_load = i;
4311 load_reg (i, offset, STACK_POINTER_REGNUM);
4312 offset += UNITS_PER_WORD;
4317 /* Align pointer properly (doubleword boundary). */
4318 offset = (offset + 7) & ~7;
4320 /* FP register restores. */
4323 /* Adjust the register to index off of. */
4324 if (frame_pointer_needed)
4325 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4327 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4329 /* Actually do the restores now. */
4330 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4331 if (df_regs_ever_live_p (i)
4332 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4334 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4335 rtx dest = gen_rtx_REG (DFmode, i);
4336 emit_move_insn (dest, src);
4340 /* Emit a blockage insn here to keep these insns from being moved to
4341 an earlier spot in the epilogue, or into the main instruction stream.
4343 This is necessary as we must not cut the stack back before all the
4344 restores are finished. */
4345 emit_insn (gen_blockage ());
4347 /* Reset stack pointer (and possibly frame pointer). The stack
4348 pointer is initially set to fp + 64 to avoid a race condition. */
4349 if (frame_pointer_needed)
4351 rtx delta = GEN_INT (-64);
4353 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4354 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4355 stack_pointer_rtx, delta));
4357 /* If we were deferring a callee register restore, do it now. */
4358 else if (merge_sp_adjust_with_load)
4360 rtx delta = GEN_INT (-actual_fsize);
4361 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4363 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4365 else if (actual_fsize != 0)
4366 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4369 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4370 frame greater than 8k), do so now. */
4372 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4374 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4376 rtx sa = EH_RETURN_STACKADJ_RTX;
4378 emit_insn (gen_blockage ());
4379 emit_insn (TARGET_64BIT
4380 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4381 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4386 pa_can_use_return_insn (void)
4388 if (!reload_completed)
4391 if (frame_pointer_needed)
4394 if (df_regs_ever_live_p (2))
4400 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4404 hppa_pic_save_rtx (void)
4406 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4409 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4410 #define NO_DEFERRED_PROFILE_COUNTERS 0
4414 /* Vector of funcdef numbers. */
4415 static vec<int> funcdef_nos;
4417 /* Output deferred profile counters. */
4419 output_deferred_profile_counters (void)
4424 if (funcdef_nos.is_empty ())
4427 switch_to_section (data_section);
4428 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4429 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4431 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4433 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4434 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4437 funcdef_nos.release ();
4441 hppa_profile_hook (int label_no)
4443 /* We use SImode for the address of the function in both 32 and
4444 64-bit code to avoid having to provide DImode versions of the
4445 lcla2 and load_offset_label_address insn patterns. */
4446 rtx reg = gen_reg_rtx (SImode);
4447 rtx label_rtx = gen_label_rtx ();
4448 rtx begin_label_rtx, call_insn;
4449 char begin_label_name[16];
4451 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4453 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4456 emit_move_insn (arg_pointer_rtx,
4457 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4460 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4462 /* The address of the function is loaded into %r25 with an instruction-
4463 relative sequence that avoids the use of relocations. The sequence
4464 is split so that the load_offset_label_address instruction can
4465 occupy the delay slot of the call to _mcount. */
4467 emit_insn (gen_lcla2 (reg, label_rtx));
4469 emit_insn (gen_lcla1 (reg, label_rtx));
4471 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4472 reg, begin_label_rtx, label_rtx));
4474 #if !NO_DEFERRED_PROFILE_COUNTERS
4476 rtx count_label_rtx, addr, r24;
4477 char count_label_name[16];
4479 funcdef_nos.safe_push (label_no);
4480 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4481 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4483 addr = force_reg (Pmode, count_label_rtx);
4484 r24 = gen_rtx_REG (Pmode, 24);
4485 emit_move_insn (r24, addr);
4488 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4489 gen_rtx_SYMBOL_REF (Pmode,
4491 GEN_INT (TARGET_64BIT ? 24 : 12)));
4493 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4498 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4499 gen_rtx_SYMBOL_REF (Pmode,
4501 GEN_INT (TARGET_64BIT ? 16 : 8)));
4505 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4506 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4508 /* Indicate the _mcount call cannot throw, nor will it execute a
4510 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4513 /* Fetch the return address for the frame COUNT steps up from
4514 the current frame, after the prologue. FRAMEADDR is the
4515 frame pointer of the COUNT frame.
4517 We want to ignore any export stub remnants here. To handle this,
4518 we examine the code at the return address, and if it is an export
4519 stub, we return a memory rtx for the stub return address stored
4522 The value returned is used in two different ways:
4524 1. To find a function's caller.
4526 2. To change the return address for a function.
4528 This function handles most instances of case 1; however, it will
4529 fail if there are two levels of stubs to execute on the return
4530 path. The only way I believe that can happen is if the return value
4531 needs a parameter relocation, which never happens for C code.
4533 This function handles most instances of case 2; however, it will
4534 fail if we did not originally have stub code on the return path
4535 but will need stub code on the new return path. This can happen if
4536 the caller & callee are both in the main program, but the new
4537 return location is in a shared library. */
4540 pa_return_addr_rtx (int count, rtx frameaddr)
4547 /* The instruction stream at the return address of a PA1.X export stub is:
4549 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4550 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4551 0x00011820 | stub+16: mtsp r1,sr0
4552 0xe0400002 | stub+20: be,n 0(sr0,rp)
4554 0xe0400002 must be specified as -532676606 so that it won't be
4555 rejected as an invalid immediate operand on 64-bit hosts.
4557 The instruction stream at the return address of a PA2.0 export stub is:
4559 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4560 0xe840d002 | stub+12: bve,n (rp)
4563 HOST_WIDE_INT insns[4];
4569 rp = get_hard_reg_initial_val (Pmode, 2);
4571 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4574 /* If there is no export stub then just use the value saved from
4575 the return pointer register. */
4577 saved_rp = gen_reg_rtx (Pmode);
4578 emit_move_insn (saved_rp, rp);
4580 /* Get pointer to the instruction stream. We have to mask out the
4581 privilege level from the two low order bits of the return address
4582 pointer here so that ins will point to the start of the first
4583 instruction that would have been executed if we returned. */
4584 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4585 label = gen_label_rtx ();
4589 insns[0] = 0x4bc23fd1;
4590 insns[1] = -398405630;
4595 insns[0] = 0x4bc23fd1;
4596 insns[1] = 0x004010a1;
4597 insns[2] = 0x00011820;
4598 insns[3] = -532676606;
4602 /* Check the instruction stream at the normal return address for the
4603 export stub. If it is an export stub, than our return address is
4604 really in -24[frameaddr]. */
4606 for (i = 0; i < len; i++)
4608 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4609 rtx op1 = GEN_INT (insns[i]);
4610 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4613 /* Here we know that our return address points to an export
4614 stub. We don't want to return the address of the export stub,
4615 but rather the return address of the export stub. That return
4616 address is stored at -24[frameaddr]. */
4618 emit_move_insn (saved_rp,
4620 memory_address (Pmode,
4621 plus_constant (Pmode, frameaddr,
4630 pa_emit_bcond_fp (rtx operands[])
4632 enum rtx_code code = GET_CODE (operands[0]);
4633 rtx operand0 = operands[1];
4634 rtx operand1 = operands[2];
4635 rtx label = operands[3];
4637 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4638 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4640 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4641 gen_rtx_IF_THEN_ELSE (VOIDmode,
4644 gen_rtx_REG (CCFPmode, 0),
4646 gen_rtx_LABEL_REF (VOIDmode, label),
4651 /* Adjust the cost of a scheduling dependency. Return the new cost of
4652 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4655 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4657 enum attr_type attr_type;
4659 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4660 true dependencies as they are described with bypasses now. */
4661 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4664 if (! recog_memoized (insn))
4667 attr_type = get_attr_type (insn);
4669 switch (REG_NOTE_KIND (link))
4672 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4675 if (attr_type == TYPE_FPLOAD)
4677 rtx pat = PATTERN (insn);
4678 rtx dep_pat = PATTERN (dep_insn);
4679 if (GET_CODE (pat) == PARALLEL)
4681 /* This happens for the fldXs,mb patterns. */
4682 pat = XVECEXP (pat, 0, 0);
4684 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4685 /* If this happens, we have to extend this to schedule
4686 optimally. Return 0 for now. */
4689 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4691 if (! recog_memoized (dep_insn))
4693 switch (get_attr_type (dep_insn))
4700 case TYPE_FPSQRTSGL:
4701 case TYPE_FPSQRTDBL:
4702 /* A fpload can't be issued until one cycle before a
4703 preceding arithmetic operation has finished if
4704 the target of the fpload is any of the sources
4705 (or destination) of the arithmetic operation. */
4706 return insn_default_latency (dep_insn) - 1;
4713 else if (attr_type == TYPE_FPALU)
4715 rtx pat = PATTERN (insn);
4716 rtx dep_pat = PATTERN (dep_insn);
4717 if (GET_CODE (pat) == PARALLEL)
4719 /* This happens for the fldXs,mb patterns. */
4720 pat = XVECEXP (pat, 0, 0);
4722 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4723 /* If this happens, we have to extend this to schedule
4724 optimally. Return 0 for now. */
4727 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4729 if (! recog_memoized (dep_insn))
4731 switch (get_attr_type (dep_insn))
4735 case TYPE_FPSQRTSGL:
4736 case TYPE_FPSQRTDBL:
4737 /* An ALU flop can't be issued until two cycles before a
4738 preceding divide or sqrt operation has finished if
4739 the target of the ALU flop is any of the sources
4740 (or destination) of the divide or sqrt operation. */
4741 return insn_default_latency (dep_insn) - 2;
4749 /* For other anti dependencies, the cost is 0. */
4752 case REG_DEP_OUTPUT:
4753 /* Output dependency; DEP_INSN writes a register that INSN writes some
4755 if (attr_type == TYPE_FPLOAD)
4757 rtx pat = PATTERN (insn);
4758 rtx dep_pat = PATTERN (dep_insn);
4759 if (GET_CODE (pat) == PARALLEL)
4761 /* This happens for the fldXs,mb patterns. */
4762 pat = XVECEXP (pat, 0, 0);
4764 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4765 /* If this happens, we have to extend this to schedule
4766 optimally. Return 0 for now. */
4769 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4771 if (! recog_memoized (dep_insn))
4773 switch (get_attr_type (dep_insn))
4780 case TYPE_FPSQRTSGL:
4781 case TYPE_FPSQRTDBL:
4782 /* A fpload can't be issued until one cycle before a
4783 preceding arithmetic operation has finished if
4784 the target of the fpload is the destination of the
4785 arithmetic operation.
4787 Exception: For PA7100LC, PA7200 and PA7300, the cost
4788 is 3 cycles, unless they bundle together. We also
4789 pay the penalty if the second insn is a fpload. */
4790 return insn_default_latency (dep_insn) - 1;
4797 else if (attr_type == TYPE_FPALU)
4799 rtx pat = PATTERN (insn);
4800 rtx dep_pat = PATTERN (dep_insn);
4801 if (GET_CODE (pat) == PARALLEL)
4803 /* This happens for the fldXs,mb patterns. */
4804 pat = XVECEXP (pat, 0, 0);
4806 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4807 /* If this happens, we have to extend this to schedule
4808 optimally. Return 0 for now. */
4811 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4813 if (! recog_memoized (dep_insn))
4815 switch (get_attr_type (dep_insn))
4819 case TYPE_FPSQRTSGL:
4820 case TYPE_FPSQRTDBL:
4821 /* An ALU flop can't be issued until two cycles before a
4822 preceding divide or sqrt operation has finished if
4823 the target of the ALU flop is also the target of
4824 the divide or sqrt operation. */
4825 return insn_default_latency (dep_insn) - 2;
4833 /* For other output dependencies, the cost is 0. */
4841 /* Adjust scheduling priorities. We use this to try and keep addil
4842 and the next use of %r1 close together. */
4844 pa_adjust_priority (rtx insn, int priority)
4846 rtx set = single_set (insn);
4850 src = SET_SRC (set);
4851 dest = SET_DEST (set);
4852 if (GET_CODE (src) == LO_SUM
4853 && symbolic_operand (XEXP (src, 1), VOIDmode)
4854 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4857 else if (GET_CODE (src) == MEM
4858 && GET_CODE (XEXP (src, 0)) == LO_SUM
4859 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4860 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4863 else if (GET_CODE (dest) == MEM
4864 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4865 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4866 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4872 /* The 700 can only issue a single insn at a time.
4873 The 7XXX processors can issue two insns at a time.
4874 The 8000 can issue 4 insns at a time. */
4876 pa_issue_rate (void)
4880 case PROCESSOR_700: return 1;
4881 case PROCESSOR_7100: return 2;
4882 case PROCESSOR_7100LC: return 2;
4883 case PROCESSOR_7200: return 2;
4884 case PROCESSOR_7300: return 2;
4885 case PROCESSOR_8000: return 4;
4894 /* Return any length plus adjustment needed by INSN which already has
4895 its length computed as LENGTH. Return LENGTH if no adjustment is
4898 Also compute the length of an inline block move here as it is too
4899 complicated to express as a length attribute in pa.md. */
4901 pa_adjust_insn_length (rtx insn, int length)
4903 rtx pat = PATTERN (insn);
4905 /* If length is negative or undefined, provide initial length. */
4906 if ((unsigned int) length >= INT_MAX)
4908 if (GET_CODE (pat) == SEQUENCE)
4909 insn = XVECEXP (pat, 0, 0);
4911 switch (get_attr_type (insn))
4914 length = pa_attr_length_millicode_call (insn);
4917 length = pa_attr_length_call (insn, 0);
4920 length = pa_attr_length_call (insn, 1);
4923 length = pa_attr_length_indirect_call (insn);
4925 case TYPE_SH_FUNC_ADRS:
4926 length = pa_attr_length_millicode_call (insn) + 20;
4933 /* Jumps inside switch tables which have unfilled delay slots need
4935 if (GET_CODE (insn) == JUMP_INSN
4936 && GET_CODE (pat) == PARALLEL
4937 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4939 /* Block move pattern. */
4940 else if (GET_CODE (insn) == INSN
4941 && GET_CODE (pat) == PARALLEL
4942 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4943 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4944 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4945 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4946 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4947 length += compute_movmem_length (insn) - 4;
4948 /* Block clear pattern. */
4949 else if (GET_CODE (insn) == INSN
4950 && GET_CODE (pat) == PARALLEL
4951 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4952 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4953 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4954 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4955 length += compute_clrmem_length (insn) - 4;
4956 /* Conditional branch with an unfilled delay slot. */
4957 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4959 /* Adjust a short backwards conditional with an unfilled delay slot. */
4960 if (GET_CODE (pat) == SET
4962 && JUMP_LABEL (insn) != NULL_RTX
4963 && ! forward_branch_p (insn))
4965 else if (GET_CODE (pat) == PARALLEL
4966 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4969 /* Adjust dbra insn with short backwards conditional branch with
4970 unfilled delay slot -- only for case where counter is in a
4971 general register register. */
4972 else if (GET_CODE (pat) == PARALLEL
4973 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4974 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4975 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4977 && ! forward_branch_p (insn))
4983 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4986 pa_print_operand_punct_valid_p (unsigned char code)
4997 /* Print operand X (an rtx) in assembler syntax to file FILE.
4998 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4999 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5002 pa_print_operand (FILE *file, rtx x, int code)
5007 /* Output a 'nop' if there's nothing for the delay slot. */
5008 if (dbr_sequence_length () == 0)
5009 fputs ("\n\tnop", file);
5012 /* Output a nullification completer if there's nothing for the */
5013 /* delay slot or nullification is requested. */
5014 if (dbr_sequence_length () == 0 ||
5016 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5020 /* Print out the second register name of a register pair.
5021 I.e., R (6) => 7. */
5022 fputs (reg_names[REGNO (x) + 1], file);
5025 /* A register or zero. */
5027 || (x == CONST0_RTX (DFmode))
5028 || (x == CONST0_RTX (SFmode)))
5030 fputs ("%r0", file);
5036 /* A register or zero (floating point). */
5038 || (x == CONST0_RTX (DFmode))
5039 || (x == CONST0_RTX (SFmode)))
5041 fputs ("%fr0", file);
5050 xoperands[0] = XEXP (XEXP (x, 0), 0);
5051 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5052 pa_output_global_address (file, xoperands[1], 0);
5053 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5057 case 'C': /* Plain (C)ondition */
5059 switch (GET_CODE (x))
5062 fputs ("=", file); break;
5064 fputs ("<>", file); break;
5066 fputs (">", file); break;
5068 fputs (">=", file); break;
5070 fputs (">>=", file); break;
5072 fputs (">>", file); break;
5074 fputs ("<", file); break;
5076 fputs ("<=", file); break;
5078 fputs ("<<=", file); break;
5080 fputs ("<<", file); break;
5085 case 'N': /* Condition, (N)egated */
5086 switch (GET_CODE (x))
5089 fputs ("<>", file); break;
5091 fputs ("=", file); break;
5093 fputs ("<=", file); break;
5095 fputs ("<", file); break;
5097 fputs ("<<", file); break;
5099 fputs ("<<=", file); break;
5101 fputs (">=", file); break;
5103 fputs (">", file); break;
5105 fputs (">>", file); break;
5107 fputs (">>=", file); break;
5112 /* For floating point comparisons. Note that the output
5113 predicates are the complement of the desired mode. The
5114 conditions for GT, GE, LT, LE and LTGT cause an invalid
5115 operation exception if the result is unordered and this
5116 exception is enabled in the floating-point status register. */
5118 switch (GET_CODE (x))
5121 fputs ("!=", file); break;
5123 fputs ("=", file); break;
5125 fputs ("!>", file); break;
5127 fputs ("!>=", file); break;
5129 fputs ("!<", file); break;
5131 fputs ("!<=", file); break;
5133 fputs ("!<>", file); break;
5135 fputs ("!?<=", file); break;
5137 fputs ("!?<", file); break;
5139 fputs ("!?>=", file); break;
5141 fputs ("!?>", file); break;
5143 fputs ("!?=", file); break;
5145 fputs ("!?", file); break;
5147 fputs ("?", file); break;
5152 case 'S': /* Condition, operands are (S)wapped. */
5153 switch (GET_CODE (x))
5156 fputs ("=", file); break;
5158 fputs ("<>", file); break;
5160 fputs ("<", file); break;
5162 fputs ("<=", file); break;
5164 fputs ("<<=", file); break;
5166 fputs ("<<", file); break;
5168 fputs (">", file); break;
5170 fputs (">=", file); break;
5172 fputs (">>=", file); break;
5174 fputs (">>", file); break;
5179 case 'B': /* Condition, (B)oth swapped and negate. */
5180 switch (GET_CODE (x))
5183 fputs ("<>", file); break;
5185 fputs ("=", file); break;
5187 fputs (">=", file); break;
5189 fputs (">", file); break;
5191 fputs (">>", file); break;
5193 fputs (">>=", file); break;
5195 fputs ("<=", file); break;
5197 fputs ("<", file); break;
5199 fputs ("<<", file); break;
5201 fputs ("<<=", file); break;
5207 gcc_assert (GET_CODE (x) == CONST_INT);
5208 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5211 gcc_assert (GET_CODE (x) == CONST_INT);
5212 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5215 gcc_assert (GET_CODE (x) == CONST_INT);
5216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5219 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5220 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5223 gcc_assert (GET_CODE (x) == CONST_INT);
5224 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5227 gcc_assert (GET_CODE (x) == CONST_INT);
5228 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5231 if (GET_CODE (x) == CONST_INT)
5236 switch (GET_CODE (XEXP (x, 0)))
5240 if (ASSEMBLER_DIALECT == 0)
5241 fputs ("s,mb", file);
5243 fputs (",mb", file);
5247 if (ASSEMBLER_DIALECT == 0)
5248 fputs ("s,ma", file);
5250 fputs (",ma", file);
5253 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5254 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5256 if (ASSEMBLER_DIALECT == 0)
5259 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5260 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5262 if (ASSEMBLER_DIALECT == 0)
5263 fputs ("x,s", file);
5267 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5271 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5277 pa_output_global_address (file, x, 0);
5280 pa_output_global_address (file, x, 1);
5282 case 0: /* Don't do anything special */
5287 compute_zdepwi_operands (INTVAL (x), op);
5288 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5294 compute_zdepdi_operands (INTVAL (x), op);
5295 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5299 /* We can get here from a .vtable_inherit due to our
5300 CONSTANT_ADDRESS_P rejecting perfectly good constant
5306 if (GET_CODE (x) == REG)
5308 fputs (reg_names [REGNO (x)], file);
5309 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5315 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5316 && (REGNO (x) & 1) == 0)
5319 else if (GET_CODE (x) == MEM)
5321 int size = GET_MODE_SIZE (GET_MODE (x));
5322 rtx base = NULL_RTX;
5323 switch (GET_CODE (XEXP (x, 0)))
5327 base = XEXP (XEXP (x, 0), 0);
5328 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5332 base = XEXP (XEXP (x, 0), 0);
5333 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5336 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5337 fprintf (file, "%s(%s)",
5338 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5339 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5340 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5341 fprintf (file, "%s(%s)",
5342 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5343 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5344 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5345 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5347 /* Because the REG_POINTER flag can get lost during reload,
5348 pa_legitimate_address_p canonicalizes the order of the
5349 index and base registers in the combined move patterns. */
5350 rtx base = XEXP (XEXP (x, 0), 1);
5351 rtx index = XEXP (XEXP (x, 0), 0);
5353 fprintf (file, "%s(%s)",
5354 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5357 output_address (XEXP (x, 0));
5360 output_address (XEXP (x, 0));
5365 output_addr_const (file, x);
5368 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5371 pa_output_global_address (FILE *file, rtx x, int round_constant)
5374 /* Imagine (high (const (plus ...))). */
5375 if (GET_CODE (x) == HIGH)
5378 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5379 output_addr_const (file, x);
5380 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5382 output_addr_const (file, x);
5383 fputs ("-$global$", file);
5385 else if (GET_CODE (x) == CONST)
5387 const char *sep = "";
5388 int offset = 0; /* assembler wants -$global$ at end */
5389 rtx base = NULL_RTX;
5391 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5394 base = XEXP (XEXP (x, 0), 0);
5395 output_addr_const (file, base);
5398 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5404 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5407 base = XEXP (XEXP (x, 0), 1);
5408 output_addr_const (file, base);
5411 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5417 /* How bogus. The compiler is apparently responsible for
5418 rounding the constant if it uses an LR field selector.
5420 The linker and/or assembler seem a better place since
5421 they have to do this kind of thing already.
5423 If we fail to do this, HP's optimizing linker may eliminate
5424 an addil, but not update the ldw/stw/ldo instruction that
5425 uses the result of the addil. */
5427 offset = ((offset + 0x1000) & ~0x1fff);
5429 switch (GET_CODE (XEXP (x, 0)))
5442 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5450 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5451 fputs ("-$global$", file);
5453 fprintf (file, "%s%d", sep, offset);
5456 output_addr_const (file, x);
5459 /* Output boilerplate text to appear at the beginning of the file.
5460 There are several possible versions. */
5461 #define aputs(x) fputs(x, asm_out_file)
5463 pa_file_start_level (void)
5466 aputs ("\t.LEVEL 2.0w\n");
5467 else if (TARGET_PA_20)
5468 aputs ("\t.LEVEL 2.0\n");
5469 else if (TARGET_PA_11)
5470 aputs ("\t.LEVEL 1.1\n");
5472 aputs ("\t.LEVEL 1.0\n");
5476 pa_file_start_space (int sortspace)
5478 aputs ("\t.SPACE $PRIVATE$");
5481 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5483 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5484 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5485 "\n\t.SPACE $TEXT$");
5488 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5489 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5493 pa_file_start_file (int want_version)
5495 if (write_symbols != NO_DEBUG)
5497 output_file_directive (asm_out_file, main_input_filename);
5499 aputs ("\t.version\t\"01.01\"\n");
5504 pa_file_start_mcount (const char *aswhat)
5507 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5511 pa_elf_file_start (void)
5513 pa_file_start_level ();
5514 pa_file_start_mcount ("ENTRY");
5515 pa_file_start_file (0);
5519 pa_som_file_start (void)
5521 pa_file_start_level ();
5522 pa_file_start_space (0);
5523 aputs ("\t.IMPORT $global$,DATA\n"
5524 "\t.IMPORT $$dyncall,MILLICODE\n");
5525 pa_file_start_mcount ("CODE");
5526 pa_file_start_file (0);
5530 pa_linux_file_start (void)
5532 pa_file_start_file (1);
5533 pa_file_start_level ();
5534 pa_file_start_mcount ("CODE");
5538 pa_hpux64_gas_file_start (void)
5540 pa_file_start_level ();
5541 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5543 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5545 pa_file_start_file (1);
5549 pa_hpux64_hpas_file_start (void)
5551 pa_file_start_level ();
5552 pa_file_start_space (1);
5553 pa_file_start_mcount ("CODE");
5554 pa_file_start_file (0);
5558 /* Search the deferred plabel list for SYMBOL and return its internal
5559 label. If an entry for SYMBOL is not found, a new entry is created. */
5562 pa_get_deferred_plabel (rtx symbol)
5564 const char *fname = XSTR (symbol, 0);
5567 /* See if we have already put this function on the list of deferred
5568 plabels. This list is generally small, so a liner search is not
5569 too ugly. If it proves too slow replace it with something faster. */
5570 for (i = 0; i < n_deferred_plabels; i++)
5571 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5574 /* If the deferred plabel list is empty, or this entry was not found
5575 on the list, create a new entry on the list. */
5576 if (deferred_plabels == NULL || i == n_deferred_plabels)
5580 if (deferred_plabels == 0)
5581 deferred_plabels = ggc_alloc_deferred_plabel ();
5583 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5585 n_deferred_plabels + 1);
5587 i = n_deferred_plabels++;
5588 deferred_plabels[i].internal_label = gen_label_rtx ();
5589 deferred_plabels[i].symbol = symbol;
5591 /* Gross. We have just implicitly taken the address of this
5592 function. Mark it in the same manner as assemble_name. */
5593 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5595 mark_referenced (id);
5598 return deferred_plabels[i].internal_label;
5602 output_deferred_plabels (void)
5606 /* If we have some deferred plabels, then we need to switch into the
5607 data or readonly data section, and align it to a 4 byte boundary
5608 before outputting the deferred plabels. */
5609 if (n_deferred_plabels)
5611 switch_to_section (flag_pic ? data_section : readonly_data_section);
5612 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5615 /* Now output the deferred plabels. */
5616 for (i = 0; i < n_deferred_plabels; i++)
5618 targetm.asm_out.internal_label (asm_out_file, "L",
5619 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5620 assemble_integer (deferred_plabels[i].symbol,
5621 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5625 /* Initialize optabs to point to emulation routines. */
5628 pa_init_libfuncs (void)
5630 if (HPUX_LONG_DOUBLE_LIBRARY)
5632 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5633 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5634 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5635 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5636 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5637 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5638 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5639 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5640 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5642 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5643 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5644 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5645 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5646 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5647 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5648 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5650 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5651 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5652 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5653 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5655 set_conv_libfunc (sfix_optab, SImode, TFmode,
5656 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5657 : "_U_Qfcnvfxt_quad_to_sgl");
5658 set_conv_libfunc (sfix_optab, DImode, TFmode,
5659 "_U_Qfcnvfxt_quad_to_dbl");
5660 set_conv_libfunc (ufix_optab, SImode, TFmode,
5661 "_U_Qfcnvfxt_quad_to_usgl");
5662 set_conv_libfunc (ufix_optab, DImode, TFmode,
5663 "_U_Qfcnvfxt_quad_to_udbl");
5665 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5666 "_U_Qfcnvxf_sgl_to_quad");
5667 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5668 "_U_Qfcnvxf_dbl_to_quad");
5669 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5670 "_U_Qfcnvxf_usgl_to_quad");
5671 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5672 "_U_Qfcnvxf_udbl_to_quad");
5675 if (TARGET_SYNC_LIBCALL)
5676 init_sync_libfuncs (UNITS_PER_WORD);
5679 /* HP's millicode routines mean something special to the assembler.
5680 Keep track of which ones we have used. */
5682 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5683 static void import_milli (enum millicodes);
5684 static char imported[(int) end1000];
5685 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5686 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5687 #define MILLI_START 10
5690 import_milli (enum millicodes code)
5692 char str[sizeof (import_string)];
5694 if (!imported[(int) code])
5696 imported[(int) code] = 1;
5697 strcpy (str, import_string);
5698 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5699 output_asm_insn (str, 0);
5703 /* The register constraints have put the operands and return value in
5704 the proper registers. */
5707 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5709 import_milli (mulI);
5710 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5713 /* Emit the rtl for doing a division by a constant. */
5715 /* Do magic division millicodes exist for this value? */
5716 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5718 /* We'll use an array to keep track of the magic millicodes and
5719 whether or not we've used them already. [n][0] is signed, [n][1] is
5722 static int div_milli[16][2];
5725 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5727 if (GET_CODE (operands[2]) == CONST_INT
5728 && INTVAL (operands[2]) > 0
5729 && INTVAL (operands[2]) < 16
5730 && pa_magic_milli[INTVAL (operands[2])])
5732 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5734 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5738 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5739 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5741 gen_rtx_REG (SImode, 26),
5743 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5744 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5745 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5746 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5747 gen_rtx_CLOBBER (VOIDmode, ret))));
5748 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5755 pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
5759 /* If the divisor is a constant, try to use one of the special
5761 if (GET_CODE (operands[0]) == CONST_INT)
5763 static char buf[100];
5764 divisor = INTVAL (operands[0]);
5765 if (!div_milli[divisor][unsignedp])
5767 div_milli[divisor][unsignedp] = 1;
5769 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5771 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5775 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5776 INTVAL (operands[0]));
5777 return pa_output_millicode_call (insn,
5778 gen_rtx_SYMBOL_REF (SImode, buf));
5782 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5783 INTVAL (operands[0]));
5784 return pa_output_millicode_call (insn,
5785 gen_rtx_SYMBOL_REF (SImode, buf));
5788 /* Divisor isn't a special constant. */
5793 import_milli (divU);
5794 return pa_output_millicode_call (insn,
5795 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5799 import_milli (divI);
5800 return pa_output_millicode_call (insn,
5801 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5806 /* Output a $$rem millicode to do mod. */
5809 pa_output_mod_insn (int unsignedp, rtx insn)
5813 import_milli (remU);
5814 return pa_output_millicode_call (insn,
5815 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5819 import_milli (remI);
5820 return pa_output_millicode_call (insn,
5821 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5826 pa_output_arg_descriptor (rtx call_insn)
5828 const char *arg_regs[4];
5829 enum machine_mode arg_mode;
5831 int i, output_flag = 0;
5834 /* We neither need nor want argument location descriptors for the
5835 64bit runtime environment or the ELF32 environment. */
5836 if (TARGET_64BIT || TARGET_ELF32)
5839 for (i = 0; i < 4; i++)
5842 /* Specify explicitly that no argument relocations should take place
5843 if using the portable runtime calling conventions. */
5844 if (TARGET_PORTABLE_RUNTIME)
5846 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5851 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5852 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5853 link; link = XEXP (link, 1))
5855 rtx use = XEXP (link, 0);
5857 if (! (GET_CODE (use) == USE
5858 && GET_CODE (XEXP (use, 0)) == REG
5859 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5862 arg_mode = GET_MODE (XEXP (use, 0));
5863 regno = REGNO (XEXP (use, 0));
5864 if (regno >= 23 && regno <= 26)
5866 arg_regs[26 - regno] = "GR";
5867 if (arg_mode == DImode)
5868 arg_regs[25 - regno] = "GR";
5870 else if (regno >= 32 && regno <= 39)
5872 if (arg_mode == SFmode)
5873 arg_regs[(regno - 32) / 2] = "FR";
5876 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5877 arg_regs[(regno - 34) / 2] = "FR";
5878 arg_regs[(regno - 34) / 2 + 1] = "FU";
5880 arg_regs[(regno - 34) / 2] = "FU";
5881 arg_regs[(regno - 34) / 2 + 1] = "FR";
5886 fputs ("\t.CALL ", asm_out_file);
5887 for (i = 0; i < 4; i++)
5892 fputc (',', asm_out_file);
5893 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5896 fputc ('\n', asm_out_file);
5899 /* Inform reload about cases where moving X with a mode MODE to or from
5900 a register in RCLASS requires an extra scratch or immediate register.
5901 Return the class needed for the immediate register. */
5904 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5905 enum machine_mode mode, secondary_reload_info *sri)
5908 enum reg_class rclass = (enum reg_class) rclass_i;
5910 /* Handle the easy stuff first. */
5911 if (rclass == R1_REGS)
5917 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5923 /* If we have something like (mem (mem (...)), we can safely assume the
5924 inner MEM will end up in a general register after reloading, so there's
5925 no need for a secondary reload. */
5926 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5929 /* Trying to load a constant into a FP register during PIC code
5930 generation requires %r1 as a scratch register. For float modes,
5931 the only legitimate constant is CONST0_RTX. However, there are
5932 a few patterns that accept constant double operands. */
5934 && FP_REG_CLASS_P (rclass)
5935 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5940 sri->icode = CODE_FOR_reload_insi_r1;
5944 sri->icode = CODE_FOR_reload_indi_r1;
5948 sri->icode = CODE_FOR_reload_insf_r1;
5952 sri->icode = CODE_FOR_reload_indf_r1;
5961 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5962 register when we're generating PIC code or when the operand isn't
5964 if (pa_symbolic_expression_p (x))
5966 if (GET_CODE (x) == HIGH)
5969 if (flag_pic || !read_only_operand (x, VOIDmode))
5974 sri->icode = CODE_FOR_reload_insi_r1;
5978 sri->icode = CODE_FOR_reload_indi_r1;
5988 /* Profiling showed the PA port spends about 1.3% of its compilation
5989 time in true_regnum from calls inside pa_secondary_reload_class. */
5990 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5991 regno = true_regnum (x);
5993 /* Handle reloads for floating point loads and stores. */
5994 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5995 && FP_REG_CLASS_P (rclass))
6001 /* We don't need an intermediate for indexed and LO_SUM DLT
6002 memory addresses. When INT14_OK_STRICT is true, it might
6003 appear that we could directly allow register indirect
6004 memory addresses. However, this doesn't work because we
6005 don't support SUBREGs in floating-point register copies
6006 and reload doesn't tell us when it's going to use a SUBREG. */
6007 if (IS_INDEX_ADDR_P (x)
6008 || IS_LO_SUM_DLT_ADDR_P (x))
6011 /* Request intermediate general register. */
6012 return GENERAL_REGS;
6015 /* Request a secondary reload with a general scratch register
6016 for everything else. ??? Could symbolic operands be handled
6017 directly when generating non-pic PA 2.0 code? */
6019 ? direct_optab_handler (reload_in_optab, mode)
6020 : direct_optab_handler (reload_out_optab, mode));
6024 /* A SAR<->FP register copy requires an intermediate general register
6025 and secondary memory. We need a secondary reload with a general
6026 scratch register for spills. */
6027 if (rclass == SHIFT_REGS)
6030 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6033 ? direct_optab_handler (reload_in_optab, mode)
6034 : direct_optab_handler (reload_out_optab, mode));
6038 /* Handle FP copy. */
6039 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6040 return GENERAL_REGS;
6043 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6044 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6045 && FP_REG_CLASS_P (rclass))
6046 return GENERAL_REGS;
6051 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6052 is only marked as live on entry by df-scan when it is a fixed
6053 register. It isn't a fixed register in the 64-bit runtime,
6054 so we need to mark it here. */
6057 pa_extra_live_on_entry (bitmap regs)
6060 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6063 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6064 to prevent it from being deleted. */
6067 pa_eh_return_handler_rtx (void)
6071 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6072 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6073 tmp = gen_rtx_MEM (word_mode, tmp);
6078 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6079 by invisible reference. As a GCC extension, we also pass anything
6080 with a zero or variable size by reference.
6082 The 64-bit runtime does not describe passing any types by invisible
6083 reference. The internals of GCC can't currently handle passing
6084 empty structures, and zero or variable length arrays when they are
6085 not passed entirely on the stack or by reference. Thus, as a GCC
6086 extension, we pass these types by reference. The HP compiler doesn't
6087 support these types, so hopefully there shouldn't be any compatibility
6088 issues. This may have to be revisited when HP releases a C99 compiler
6089 or updates the ABI. */
6092 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6093 enum machine_mode mode, const_tree type,
6094 bool named ATTRIBUTE_UNUSED)
6099 size = int_size_in_bytes (type);
6101 size = GET_MODE_SIZE (mode);
6106 return size <= 0 || size > 8;
6110 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6115 && (AGGREGATE_TYPE_P (type)
6116 || TREE_CODE (type) == COMPLEX_TYPE
6117 || TREE_CODE (type) == VECTOR_TYPE)))
6119 /* Return none if justification is not required. */
6121 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6122 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6125 /* The directions set here are ignored when a BLKmode argument larger
6126 than a word is placed in a register. Different code is used for
6127 the stack and registers. This makes it difficult to have a
6128 consistent data representation for both the stack and registers.
6129 For both runtimes, the justification and padding for arguments on
6130 the stack and in registers should be identical. */
6132 /* The 64-bit runtime specifies left justification for aggregates. */
6135 /* The 32-bit runtime architecture specifies right justification.
6136 When the argument is passed on the stack, the argument is padded
6137 with garbage on the left. The HP compiler pads with zeros. */
6141 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6148 /* Do what is necessary for `va_start'. We look at the current function
6149 to determine if stdargs or varargs is used and fill in an initial
6150 va_list. A pointer to this constructor is returned. */
6153 hppa_builtin_saveregs (void)
6156 tree fntype = TREE_TYPE (current_function_decl);
6157 int argadj = ((!stdarg_p (fntype))
6158 ? UNITS_PER_WORD : 0);
6161 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6163 offset = crtl->args.arg_offset_rtx;
6169 /* Adjust for varargs/stdarg differences. */
6171 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6173 offset = crtl->args.arg_offset_rtx;
6175 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6176 from the incoming arg pointer and growing to larger addresses. */
6177 for (i = 26, off = -64; i >= 19; i--, off += 8)
6178 emit_move_insn (gen_rtx_MEM (word_mode,
6179 plus_constant (Pmode,
6180 arg_pointer_rtx, off)),
6181 gen_rtx_REG (word_mode, i));
6183 /* The incoming args pointer points just beyond the flushback area;
6184 normally this is not a serious concern. However, when we are doing
6185 varargs/stdargs we want to make the arg pointer point to the start
6186 of the incoming argument area. */
6187 emit_move_insn (virtual_incoming_args_rtx,
6188 plus_constant (Pmode, arg_pointer_rtx, -64));
6190 /* Now return a pointer to the first anonymous argument. */
6191 return copy_to_reg (expand_binop (Pmode, add_optab,
6192 virtual_incoming_args_rtx,
6193 offset, 0, 0, OPTAB_LIB_WIDEN));
6196 /* Store general registers on the stack. */
6197 dest = gen_rtx_MEM (BLKmode,
6198 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6200 set_mem_alias_set (dest, get_varargs_alias_set ());
6201 set_mem_align (dest, BITS_PER_WORD);
6202 move_block_from_reg (23, dest, 4);
6204 /* move_block_from_reg will emit code to store the argument registers
6205 individually as scalar stores.
6207 However, other insns may later load from the same addresses for
6208 a structure load (passing a struct to a varargs routine).
6210 The alias code assumes that such aliasing can never happen, so we
6211 have to keep memory referencing insns from moving up beyond the
6212 last argument register store. So we emit a blockage insn here. */
6213 emit_insn (gen_blockage ());
6215 return copy_to_reg (expand_binop (Pmode, add_optab,
6216 crtl->args.internal_arg_pointer,
6217 offset, 0, 0, OPTAB_LIB_WIDEN));
6221 hppa_va_start (tree valist, rtx nextarg)
6223 nextarg = expand_builtin_saveregs ();
6224 std_expand_builtin_va_start (valist, nextarg);
6228 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6233 /* Args grow upward. We can use the generic routines. */
6234 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6236 else /* !TARGET_64BIT */
6238 tree ptr = build_pointer_type (type);
6241 unsigned int size, ofs;
6244 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6248 ptr = build_pointer_type (type);
6250 size = int_size_in_bytes (type);
6251 valist_type = TREE_TYPE (valist);
6253 /* Args grow down. Not handled by generic routines. */
6255 u = fold_convert (sizetype, size_in_bytes (type));
6256 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6257 t = fold_build_pointer_plus (valist, u);
6259 /* Align to 4 or 8 byte boundary depending on argument size. */
6261 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6262 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6263 t = fold_convert (valist_type, t);
6265 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6267 ofs = (8 - size) % 4;
6269 t = fold_build_pointer_plus_hwi (t, ofs);
6271 t = fold_convert (ptr, t);
6272 t = build_va_arg_indirect_ref (t);
6275 t = build_va_arg_indirect_ref (t);
6281 /* True if MODE is valid for the target. By "valid", we mean able to
6282 be manipulated in non-trivial ways. In particular, this means all
6283 the arithmetic is supported.
6285 Currently, TImode is not valid as the HP 64-bit runtime documentation
6286 doesn't document the alignment and calling conventions for this type.
6287 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6288 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6291 pa_scalar_mode_supported_p (enum machine_mode mode)
6293 int precision = GET_MODE_PRECISION (mode);
6295 switch (GET_MODE_CLASS (mode))
6297 case MODE_PARTIAL_INT:
6299 if (precision == CHAR_TYPE_SIZE)
6301 if (precision == SHORT_TYPE_SIZE)
6303 if (precision == INT_TYPE_SIZE)
6305 if (precision == LONG_TYPE_SIZE)
6307 if (precision == LONG_LONG_TYPE_SIZE)
6312 if (precision == FLOAT_TYPE_SIZE)
6314 if (precision == DOUBLE_TYPE_SIZE)
6316 if (precision == LONG_DOUBLE_TYPE_SIZE)
6320 case MODE_DECIMAL_FLOAT:
6328 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6329 it branches into the delay slot. Otherwise, return FALSE. */
6332 branch_to_delay_slot_p (rtx insn)
6336 if (dbr_sequence_length ())
6339 jump_insn = next_active_insn (JUMP_LABEL (insn));
6342 insn = next_active_insn (insn);
6343 if (jump_insn == insn)
6346 /* We can't rely on the length of asms. So, we return FALSE when
6347 the branch is followed by an asm. */
6349 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6350 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6351 || get_attr_length (insn) > 0)
6358 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6360 This occurs when INSN has an unfilled delay slot and is followed
6361 by an asm. Disaster can occur if the asm is empty and the jump
6362 branches into the delay slot. So, we add a nop in the delay slot
6363 when this occurs. */
6366 branch_needs_nop_p (rtx insn)
6370 if (dbr_sequence_length ())
6373 jump_insn = next_active_insn (JUMP_LABEL (insn));
6376 insn = next_active_insn (insn);
6377 if (!insn || jump_insn == insn)
6380 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6381 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6382 && get_attr_length (insn) > 0)
6389 /* Return TRUE if INSN, a forward jump insn, can use nullification
6390 to skip the following instruction. This avoids an extra cycle due
6391 to a mis-predicted branch when we fall through. */
6394 use_skip_p (rtx insn)
6396 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6400 insn = next_active_insn (insn);
6402 /* We can't rely on the length of asms, so we can't skip asms. */
6404 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6405 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6407 if (get_attr_length (insn) == 4
6408 && jump_insn == next_active_insn (insn))
6410 if (get_attr_length (insn) > 0)
6417 /* This routine handles all the normal conditional branch sequences we
6418 might need to generate. It handles compare immediate vs compare
6419 register, nullification of delay slots, varying length branches,
6420 negated branches, and all combinations of the above. It returns the
6421 output appropriate to emit the branch corresponding to all given
6425 pa_output_cbranch (rtx *operands, int negated, rtx insn)
6427 static char buf[100];
6429 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6430 int length = get_attr_length (insn);
6433 /* A conditional branch to the following instruction (e.g. the delay slot)
6434 is asking for a disaster. This can happen when not optimizing and
6435 when jump optimization fails.
6437 While it is usually safe to emit nothing, this can fail if the
6438 preceding instruction is a nullified branch with an empty delay
6439 slot and the same branch target as this branch. We could check
6440 for this but jump optimization should eliminate nop jumps. It
6441 is always safe to emit a nop. */
6442 if (branch_to_delay_slot_p (insn))
6445 /* The doubleword form of the cmpib instruction doesn't have the LEU
6446 and GTU conditions while the cmpb instruction does. Since we accept
6447 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6448 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6449 operands[2] = gen_rtx_REG (DImode, 0);
6450 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6451 operands[1] = gen_rtx_REG (DImode, 0);
6453 /* If this is a long branch with its delay slot unfilled, set `nullify'
6454 as it can nullify the delay slot and save a nop. */
6455 if (length == 8 && dbr_sequence_length () == 0)
6458 /* If this is a short forward conditional branch which did not get
6459 its delay slot filled, the delay slot can still be nullified. */
6460 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6461 nullify = forward_branch_p (insn);
6463 /* A forward branch over a single nullified insn can be done with a
6464 comclr instruction. This avoids a single cycle penalty due to
6465 mis-predicted branch if we fall through (branch not taken). */
6466 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6470 /* All short conditional branches except backwards with an unfilled
6474 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6476 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6477 if (GET_MODE (operands[1]) == DImode)
6480 strcat (buf, "%B3");
6482 strcat (buf, "%S3");
6484 strcat (buf, " %2,%r1,%%r0");
6487 if (branch_needs_nop_p (insn))
6488 strcat (buf, ",n %2,%r1,%0%#");
6490 strcat (buf, ",n %2,%r1,%0");
6493 strcat (buf, " %2,%r1,%0");
6496 /* All long conditionals. Note a short backward branch with an
6497 unfilled delay slot is treated just like a long backward branch
6498 with an unfilled delay slot. */
6500 /* Handle weird backwards branch with a filled delay slot
6501 which is nullified. */
6502 if (dbr_sequence_length () != 0
6503 && ! forward_branch_p (insn)
6506 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6507 if (GET_MODE (operands[1]) == DImode)
6510 strcat (buf, "%S3");
6512 strcat (buf, "%B3");
6513 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6515 /* Handle short backwards branch with an unfilled delay slot.
6516 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6517 taken and untaken branches. */
6518 else if (dbr_sequence_length () == 0
6519 && ! forward_branch_p (insn)
6520 && INSN_ADDRESSES_SET_P ()
6521 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6522 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6524 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6525 if (GET_MODE (operands[1]) == DImode)
6528 strcat (buf, "%B3 %2,%r1,%0%#");
6530 strcat (buf, "%S3 %2,%r1,%0%#");
6534 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6535 if (GET_MODE (operands[1]) == DImode)
6538 strcat (buf, "%S3");
6540 strcat (buf, "%B3");
6542 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6544 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6549 /* The reversed conditional branch must branch over one additional
6550 instruction if the delay slot is filled and needs to be extracted
6551 by pa_output_lbranch. If the delay slot is empty or this is a
6552 nullified forward branch, the instruction after the reversed
6553 condition branch must be nullified. */
6554 if (dbr_sequence_length () == 0
6555 || (nullify && forward_branch_p (insn)))
6559 operands[4] = GEN_INT (length);
6564 operands[4] = GEN_INT (length + 4);
6567 /* Create a reversed conditional branch which branches around
6568 the following insns. */
6569 if (GET_MODE (operands[1]) != DImode)
6575 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6578 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6584 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6587 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6596 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6599 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6605 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6608 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6612 output_asm_insn (buf, operands);
6613 return pa_output_lbranch (operands[0], insn, xdelay);
6618 /* This routine handles output of long unconditional branches that
6619 exceed the maximum range of a simple branch instruction. Since
6620 we don't have a register available for the branch, we save register
6621 %r1 in the frame marker, load the branch destination DEST into %r1,
6622 execute the branch, and restore %r1 in the delay slot of the branch.
6624 Since long branches may have an insn in the delay slot and the
6625 delay slot is used to restore %r1, we in general need to extract
6626 this insn and execute it before the branch. However, to facilitate
6627 use of this function by conditional branches, we also provide an
6628 option to not extract the delay insn so that it will be emitted
6629 after the long branch. So, if there is an insn in the delay slot,
6630 it is extracted if XDELAY is nonzero.
6632 The lengths of the various long-branch sequences are 20, 16 and 24
6633 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6636 pa_output_lbranch (rtx dest, rtx insn, int xdelay)
6640 xoperands[0] = dest;
6642 /* First, free up the delay slot. */
6643 if (xdelay && dbr_sequence_length () != 0)
6645 /* We can't handle a jump in the delay slot. */
6646 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6648 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6651 /* Now delete the delay insn. */
6652 SET_INSN_DELETED (NEXT_INSN (insn));
6655 /* Output an insn to save %r1. The runtime documentation doesn't
6656 specify whether the "Clean Up" slot in the callers frame can
6657 be clobbered by the callee. It isn't copied by HP's builtin
6658 alloca, so this suggests that it can be clobbered if necessary.
6659 The "Static Link" location is copied by HP builtin alloca, so
6660 we avoid using it. Using the cleanup slot might be a problem
6661 if we have to interoperate with languages that pass cleanup
6662 information. However, it should be possible to handle these
6663 situations with GCC's asm feature.
6665 The "Current RP" slot is reserved for the called procedure, so
6666 we try to use it when we don't have a frame of our own. It's
6667 rather unlikely that we won't have a frame when we need to emit
6670 Really the way to go long term is a register scavenger; goto
6671 the target of the jump and find a register which we can use
6672 as a scratch to hold the value in %r1. Then, we wouldn't have
6673 to free up the delay slot or clobber a slot that may be needed
6674 for other purposes. */
6677 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6678 /* Use the return pointer slot in the frame marker. */
6679 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6681 /* Use the slot at -40 in the frame marker since HP builtin
6682 alloca doesn't copy it. */
6683 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6687 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6688 /* Use the return pointer slot in the frame marker. */
6689 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6691 /* Use the "Clean Up" slot in the frame marker. In GCC,
6692 the only other use of this location is for copying a
6693 floating point double argument from a floating-point
6694 register to two general registers. The copy is done
6695 as an "atomic" operation when outputting a call, so it
6696 won't interfere with our using the location here. */
6697 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6700 if (TARGET_PORTABLE_RUNTIME)
6702 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6703 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6704 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6708 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6709 if (TARGET_SOM || !TARGET_GAS)
6711 xoperands[1] = gen_label_rtx ();
6712 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6713 targetm.asm_out.internal_label (asm_out_file, "L",
6714 CODE_LABEL_NUMBER (xoperands[1]));
6715 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6719 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6720 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6722 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6725 /* Now output a very long branch to the original target. */
6726 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6728 /* Now restore the value of %r1 in the delay slot. */
6731 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6732 return "ldd -16(%%r30),%%r1";
6734 return "ldd -40(%%r30),%%r1";
6738 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6739 return "ldw -20(%%r30),%%r1";
6741 return "ldw -12(%%r30),%%r1";
6745 /* This routine handles all the branch-on-bit conditional branch sequences we
6746 might need to generate. It handles nullification of delay slots,
6747 varying length branches, negated branches and all combinations of the
6748 above. it returns the appropriate output template to emit the branch. */
6751 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6753 static char buf[100];
6755 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6756 int length = get_attr_length (insn);
6759 /* A conditional branch to the following instruction (e.g. the delay slot) is
6760 asking for a disaster. I do not think this can happen as this pattern
6761 is only used when optimizing; jump optimization should eliminate the
6762 jump. But be prepared just in case. */
6764 if (branch_to_delay_slot_p (insn))
6767 /* If this is a long branch with its delay slot unfilled, set `nullify'
6768 as it can nullify the delay slot and save a nop. */
6769 if (length == 8 && dbr_sequence_length () == 0)
6772 /* If this is a short forward conditional branch which did not get
6773 its delay slot filled, the delay slot can still be nullified. */
6774 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6775 nullify = forward_branch_p (insn);
6777 /* A forward branch over a single nullified insn can be done with a
6778 extrs instruction. This avoids a single cycle penalty due to
6779 mis-predicted branch if we fall through (branch not taken). */
6780 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6785 /* All short conditional branches except backwards with an unfilled
6789 strcpy (buf, "{extrs,|extrw,s,}");
6791 strcpy (buf, "bb,");
6792 if (useskip && GET_MODE (operands[0]) == DImode)
6793 strcpy (buf, "extrd,s,*");
6794 else if (GET_MODE (operands[0]) == DImode)
6795 strcpy (buf, "bb,*");
6796 if ((which == 0 && negated)
6797 || (which == 1 && ! negated))
6802 strcat (buf, " %0,%1,1,%%r0");
6803 else if (nullify && negated)
6805 if (branch_needs_nop_p (insn))
6806 strcat (buf, ",n %0,%1,%3%#");
6808 strcat (buf, ",n %0,%1,%3");
6810 else if (nullify && ! negated)
6812 if (branch_needs_nop_p (insn))
6813 strcat (buf, ",n %0,%1,%2%#");
6815 strcat (buf, ",n %0,%1,%2");
6817 else if (! nullify && negated)
6818 strcat (buf, " %0,%1,%3");
6819 else if (! nullify && ! negated)
6820 strcat (buf, " %0,%1,%2");
6823 /* All long conditionals. Note a short backward branch with an
6824 unfilled delay slot is treated just like a long backward branch
6825 with an unfilled delay slot. */
6827 /* Handle weird backwards branch with a filled delay slot
6828 which is nullified. */
6829 if (dbr_sequence_length () != 0
6830 && ! forward_branch_p (insn)
6833 strcpy (buf, "bb,");
6834 if (GET_MODE (operands[0]) == DImode)
6836 if ((which == 0 && negated)
6837 || (which == 1 && ! negated))
6842 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6844 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6846 /* Handle short backwards branch with an unfilled delay slot.
6847 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6848 taken and untaken branches. */
6849 else if (dbr_sequence_length () == 0
6850 && ! forward_branch_p (insn)
6851 && INSN_ADDRESSES_SET_P ()
6852 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6853 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6855 strcpy (buf, "bb,");
6856 if (GET_MODE (operands[0]) == DImode)
6858 if ((which == 0 && negated)
6859 || (which == 1 && ! negated))
6864 strcat (buf, " %0,%1,%3%#");
6866 strcat (buf, " %0,%1,%2%#");
6870 if (GET_MODE (operands[0]) == DImode)
6871 strcpy (buf, "extrd,s,*");
6873 strcpy (buf, "{extrs,|extrw,s,}");
6874 if ((which == 0 && negated)
6875 || (which == 1 && ! negated))
6879 if (nullify && negated)
6880 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6881 else if (nullify && ! negated)
6882 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6884 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6886 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6891 /* The reversed conditional branch must branch over one additional
6892 instruction if the delay slot is filled and needs to be extracted
6893 by pa_output_lbranch. If the delay slot is empty or this is a
6894 nullified forward branch, the instruction after the reversed
6895 condition branch must be nullified. */
6896 if (dbr_sequence_length () == 0
6897 || (nullify && forward_branch_p (insn)))
6901 operands[4] = GEN_INT (length);
6906 operands[4] = GEN_INT (length + 4);
6909 if (GET_MODE (operands[0]) == DImode)
6910 strcpy (buf, "bb,*");
6912 strcpy (buf, "bb,");
6913 if ((which == 0 && negated)
6914 || (which == 1 && !negated))
6919 strcat (buf, ",n %0,%1,.+%4");
6921 strcat (buf, " %0,%1,.+%4");
6922 output_asm_insn (buf, operands);
6923 return pa_output_lbranch (negated ? operands[3] : operands[2],
6929 /* This routine handles all the branch-on-variable-bit conditional branch
6930 sequences we might need to generate. It handles nullification of delay
6931 slots, varying length branches, negated branches and all combinations
6932 of the above. it returns the appropriate output template to emit the
6936 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
6939 static char buf[100];
6941 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6942 int length = get_attr_length (insn);
6945 /* A conditional branch to the following instruction (e.g. the delay slot) is
6946 asking for a disaster. I do not think this can happen as this pattern
6947 is only used when optimizing; jump optimization should eliminate the
6948 jump. But be prepared just in case. */
6950 if (branch_to_delay_slot_p (insn))
6953 /* If this is a long branch with its delay slot unfilled, set `nullify'
6954 as it can nullify the delay slot and save a nop. */
6955 if (length == 8 && dbr_sequence_length () == 0)
6958 /* If this is a short forward conditional branch which did not get
6959 its delay slot filled, the delay slot can still be nullified. */
6960 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6961 nullify = forward_branch_p (insn);
6963 /* A forward branch over a single nullified insn can be done with a
6964 extrs instruction. This avoids a single cycle penalty due to
6965 mis-predicted branch if we fall through (branch not taken). */
6966 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6971 /* All short conditional branches except backwards with an unfilled
6975 strcpy (buf, "{vextrs,|extrw,s,}");
6977 strcpy (buf, "{bvb,|bb,}");
6978 if (useskip && GET_MODE (operands[0]) == DImode)
6979 strcpy (buf, "extrd,s,*");
6980 else if (GET_MODE (operands[0]) == DImode)
6981 strcpy (buf, "bb,*");
6982 if ((which == 0 && negated)
6983 || (which == 1 && ! negated))
6988 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6989 else if (nullify && negated)
6991 if (branch_needs_nop_p (insn))
6992 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6994 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6996 else if (nullify && ! negated)
6998 if (branch_needs_nop_p (insn))
6999 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7001 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7003 else if (! nullify && negated)
7004 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7005 else if (! nullify && ! negated)
7006 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7009 /* All long conditionals. Note a short backward branch with an
7010 unfilled delay slot is treated just like a long backward branch
7011 with an unfilled delay slot. */
7013 /* Handle weird backwards branch with a filled delay slot
7014 which is nullified. */
7015 if (dbr_sequence_length () != 0
7016 && ! forward_branch_p (insn)
7019 strcpy (buf, "{bvb,|bb,}");
7020 if (GET_MODE (operands[0]) == DImode)
7022 if ((which == 0 && negated)
7023 || (which == 1 && ! negated))
7028 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7030 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7032 /* Handle short backwards branch with an unfilled delay slot.
7033 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7034 taken and untaken branches. */
7035 else if (dbr_sequence_length () == 0
7036 && ! forward_branch_p (insn)
7037 && INSN_ADDRESSES_SET_P ()
7038 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7039 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7041 strcpy (buf, "{bvb,|bb,}");
7042 if (GET_MODE (operands[0]) == DImode)
7044 if ((which == 0 && negated)
7045 || (which == 1 && ! negated))
7050 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7052 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7056 strcpy (buf, "{vextrs,|extrw,s,}");
7057 if (GET_MODE (operands[0]) == DImode)
7058 strcpy (buf, "extrd,s,*");
7059 if ((which == 0 && negated)
7060 || (which == 1 && ! negated))
7064 if (nullify && negated)
7065 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7066 else if (nullify && ! negated)
7067 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7069 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7071 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7076 /* The reversed conditional branch must branch over one additional
7077 instruction if the delay slot is filled and needs to be extracted
7078 by pa_output_lbranch. If the delay slot is empty or this is a
7079 nullified forward branch, the instruction after the reversed
7080 condition branch must be nullified. */
7081 if (dbr_sequence_length () == 0
7082 || (nullify && forward_branch_p (insn)))
7086 operands[4] = GEN_INT (length);
7091 operands[4] = GEN_INT (length + 4);
7094 if (GET_MODE (operands[0]) == DImode)
7095 strcpy (buf, "bb,*");
7097 strcpy (buf, "{bvb,|bb,}");
7098 if ((which == 0 && negated)
7099 || (which == 1 && !negated))
7104 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7106 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7107 output_asm_insn (buf, operands);
7108 return pa_output_lbranch (negated ? operands[3] : operands[2],
7114 /* Return the output template for emitting a dbra type insn.
7116 Note it may perform some output operations on its own before
7117 returning the final output string. */
7119 pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
7121 int length = get_attr_length (insn);
7123 /* A conditional branch to the following instruction (e.g. the delay slot) is
7124 asking for a disaster. Be prepared! */
7126 if (branch_to_delay_slot_p (insn))
7128 if (which_alternative == 0)
7129 return "ldo %1(%0),%0";
7130 else if (which_alternative == 1)
7132 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7133 output_asm_insn ("ldw -16(%%r30),%4", operands);
7134 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7135 return "{fldws|fldw} -16(%%r30),%0";
7139 output_asm_insn ("ldw %0,%4", operands);
7140 return "ldo %1(%4),%4\n\tstw %4,%0";
7144 if (which_alternative == 0)
7146 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7149 /* If this is a long branch with its delay slot unfilled, set `nullify'
7150 as it can nullify the delay slot and save a nop. */
7151 if (length == 8 && dbr_sequence_length () == 0)
7154 /* If this is a short forward conditional branch which did not get
7155 its delay slot filled, the delay slot can still be nullified. */
7156 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7157 nullify = forward_branch_p (insn);
7164 if (branch_needs_nop_p (insn))
7165 return "addib,%C2,n %1,%0,%3%#";
7167 return "addib,%C2,n %1,%0,%3";
7170 return "addib,%C2 %1,%0,%3";
7173 /* Handle weird backwards branch with a fulled delay slot
7174 which is nullified. */
7175 if (dbr_sequence_length () != 0
7176 && ! forward_branch_p (insn)
7178 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7179 /* Handle short backwards branch with an unfilled delay slot.
7180 Using a addb;nop rather than addi;bl saves 1 cycle for both
7181 taken and untaken branches. */
7182 else if (dbr_sequence_length () == 0
7183 && ! forward_branch_p (insn)
7184 && INSN_ADDRESSES_SET_P ()
7185 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7186 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7187 return "addib,%C2 %1,%0,%3%#";
7189 /* Handle normal cases. */
7191 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7193 return "addi,%N2 %1,%0,%0\n\tb %3";
7196 /* The reversed conditional branch must branch over one additional
7197 instruction if the delay slot is filled and needs to be extracted
7198 by pa_output_lbranch. If the delay slot is empty or this is a
7199 nullified forward branch, the instruction after the reversed
7200 condition branch must be nullified. */
7201 if (dbr_sequence_length () == 0
7202 || (nullify && forward_branch_p (insn)))
7206 operands[4] = GEN_INT (length);
7211 operands[4] = GEN_INT (length + 4);
7215 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7217 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7219 return pa_output_lbranch (operands[3], insn, xdelay);
7223 /* Deal with gross reload from FP register case. */
7224 else if (which_alternative == 1)
7226 /* Move loop counter from FP register to MEM then into a GR,
7227 increment the GR, store the GR into MEM, and finally reload
7228 the FP register from MEM from within the branch's delay slot. */
7229 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7231 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7233 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7234 else if (length == 28)
7235 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7238 operands[5] = GEN_INT (length - 16);
7239 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7240 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7241 return pa_output_lbranch (operands[3], insn, 0);
7244 /* Deal with gross reload from memory case. */
7247 /* Reload loop counter from memory, the store back to memory
7248 happens in the branch's delay slot. */
7249 output_asm_insn ("ldw %0,%4", operands);
7251 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7252 else if (length == 16)
7253 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7256 operands[5] = GEN_INT (length - 4);
7257 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7258 return pa_output_lbranch (operands[3], insn, 0);
7263 /* Return the output template for emitting a movb type insn.
7265 Note it may perform some output operations on its own before
7266 returning the final output string. */
7268 pa_output_movb (rtx *operands, rtx insn, int which_alternative,
7269 int reverse_comparison)
7271 int length = get_attr_length (insn);
7273 /* A conditional branch to the following instruction (e.g. the delay slot) is
7274 asking for a disaster. Be prepared! */
7276 if (branch_to_delay_slot_p (insn))
7278 if (which_alternative == 0)
7279 return "copy %1,%0";
7280 else if (which_alternative == 1)
7282 output_asm_insn ("stw %1,-16(%%r30)", operands);
7283 return "{fldws|fldw} -16(%%r30),%0";
7285 else if (which_alternative == 2)
7291 /* Support the second variant. */
7292 if (reverse_comparison)
7293 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7295 if (which_alternative == 0)
7297 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7300 /* If this is a long branch with its delay slot unfilled, set `nullify'
7301 as it can nullify the delay slot and save a nop. */
7302 if (length == 8 && dbr_sequence_length () == 0)
7305 /* If this is a short forward conditional branch which did not get
7306 its delay slot filled, the delay slot can still be nullified. */
7307 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7308 nullify = forward_branch_p (insn);
7315 if (branch_needs_nop_p (insn))
7316 return "movb,%C2,n %1,%0,%3%#";
7318 return "movb,%C2,n %1,%0,%3";
7321 return "movb,%C2 %1,%0,%3";
7324 /* Handle weird backwards branch with a filled delay slot
7325 which is nullified. */
7326 if (dbr_sequence_length () != 0
7327 && ! forward_branch_p (insn)
7329 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7331 /* Handle short backwards branch with an unfilled delay slot.
7332 Using a movb;nop rather than or;bl saves 1 cycle for both
7333 taken and untaken branches. */
7334 else if (dbr_sequence_length () == 0
7335 && ! forward_branch_p (insn)
7336 && INSN_ADDRESSES_SET_P ()
7337 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7338 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7339 return "movb,%C2 %1,%0,%3%#";
7340 /* Handle normal cases. */
7342 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7344 return "or,%N2 %1,%%r0,%0\n\tb %3";
7347 /* The reversed conditional branch must branch over one additional
7348 instruction if the delay slot is filled and needs to be extracted
7349 by pa_output_lbranch. If the delay slot is empty or this is a
7350 nullified forward branch, the instruction after the reversed
7351 condition branch must be nullified. */
7352 if (dbr_sequence_length () == 0
7353 || (nullify && forward_branch_p (insn)))
7357 operands[4] = GEN_INT (length);
7362 operands[4] = GEN_INT (length + 4);
7366 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7368 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7370 return pa_output_lbranch (operands[3], insn, xdelay);
7373 /* Deal with gross reload for FP destination register case. */
7374 else if (which_alternative == 1)
7376 /* Move source register to MEM, perform the branch test, then
7377 finally load the FP register from MEM from within the branch's
7379 output_asm_insn ("stw %1,-16(%%r30)", operands);
7381 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7382 else if (length == 16)
7383 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7386 operands[4] = GEN_INT (length - 4);
7387 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7388 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7389 return pa_output_lbranch (operands[3], insn, 0);
7392 /* Deal with gross reload from memory case. */
7393 else if (which_alternative == 2)
7395 /* Reload loop counter from memory, the store back to memory
7396 happens in the branch's delay slot. */
7398 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7399 else if (length == 12)
7400 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7403 operands[4] = GEN_INT (length);
7404 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7406 return pa_output_lbranch (operands[3], insn, 0);
7409 /* Handle SAR as a destination. */
7413 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7414 else if (length == 12)
7415 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7418 operands[4] = GEN_INT (length);
7419 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7421 return pa_output_lbranch (operands[3], insn, 0);
7426 /* Copy any FP arguments in INSN into integer registers. */
7428 copy_fp_args (rtx insn)
7433 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7435 int arg_mode, regno;
7436 rtx use = XEXP (link, 0);
7438 if (! (GET_CODE (use) == USE
7439 && GET_CODE (XEXP (use, 0)) == REG
7440 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7443 arg_mode = GET_MODE (XEXP (use, 0));
7444 regno = REGNO (XEXP (use, 0));
7446 /* Is it a floating point register? */
7447 if (regno >= 32 && regno <= 39)
7449 /* Copy the FP register into an integer register via memory. */
7450 if (arg_mode == SFmode)
7452 xoperands[0] = XEXP (use, 0);
7453 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7454 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7455 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7459 xoperands[0] = XEXP (use, 0);
7460 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7461 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7462 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7463 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7469 /* Compute length of the FP argument copy sequence for INSN. */
7471 length_fp_args (rtx insn)
7476 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7478 int arg_mode, regno;
7479 rtx use = XEXP (link, 0);
7481 if (! (GET_CODE (use) == USE
7482 && GET_CODE (XEXP (use, 0)) == REG
7483 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7486 arg_mode = GET_MODE (XEXP (use, 0));
7487 regno = REGNO (XEXP (use, 0));
7489 /* Is it a floating point register? */
7490 if (regno >= 32 && regno <= 39)
7492 if (arg_mode == SFmode)
7502 /* Return the attribute length for the millicode call instruction INSN.
7503 The length must match the code generated by pa_output_millicode_call.
7504 We include the delay slot in the returned length as it is better to
7505 over estimate the length than to under estimate it. */
7508 pa_attr_length_millicode_call (rtx insn)
7510 unsigned long distance = -1;
7511 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7513 if (INSN_ADDRESSES_SET_P ())
7515 distance = (total + insn_current_reference_address (insn));
7516 if (distance < total)
7522 if (!TARGET_LONG_CALLS && distance < 7600000)
7527 else if (TARGET_PORTABLE_RUNTIME)
7531 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7534 if (TARGET_LONG_ABS_CALL && !flag_pic)
7541 /* INSN is a function call. It may have an unconditional jump
7544 CALL_DEST is the routine we are calling. */
7547 pa_output_millicode_call (rtx insn, rtx call_dest)
7549 int attr_length = get_attr_length (insn);
7550 int seq_length = dbr_sequence_length ();
7555 xoperands[0] = call_dest;
7556 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7558 /* Handle the common case where we are sure that the branch will
7559 reach the beginning of the $CODE$ subspace. The within reach
7560 form of the $$sh_func_adrs call has a length of 28. Because it
7561 has an attribute type of sh_func_adrs, it never has a nonzero
7562 sequence length (i.e., the delay slot is never filled). */
7563 if (!TARGET_LONG_CALLS
7564 && (attr_length == 8
7565 || (attr_length == 28
7566 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7568 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7574 /* It might seem that one insn could be saved by accessing
7575 the millicode function using the linkage table. However,
7576 this doesn't work in shared libraries and other dynamically
7577 loaded objects. Using a pc-relative sequence also avoids
7578 problems related to the implicit use of the gp register. */
7579 output_asm_insn ("b,l .+8,%%r1", xoperands);
7583 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7584 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7588 xoperands[1] = gen_label_rtx ();
7589 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7590 targetm.asm_out.internal_label (asm_out_file, "L",
7591 CODE_LABEL_NUMBER (xoperands[1]));
7592 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7595 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7597 else if (TARGET_PORTABLE_RUNTIME)
7599 /* Pure portable runtime doesn't allow be/ble; we also don't
7600 have PIC support in the assembler/linker, so this sequence
7603 /* Get the address of our target into %r1. */
7604 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7605 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7607 /* Get our return address into %r31. */
7608 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7609 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7611 /* Jump to our target address in %r1. */
7612 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7616 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7618 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7620 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7624 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7625 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7627 if (TARGET_SOM || !TARGET_GAS)
7629 /* The HP assembler can generate relocations for the
7630 difference of two symbols. GAS can do this for a
7631 millicode symbol but not an arbitrary external
7632 symbol when generating SOM output. */
7633 xoperands[1] = gen_label_rtx ();
7634 targetm.asm_out.internal_label (asm_out_file, "L",
7635 CODE_LABEL_NUMBER (xoperands[1]));
7636 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7637 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7641 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7642 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7646 /* Jump to our target address in %r1. */
7647 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7651 if (seq_length == 0)
7652 output_asm_insn ("nop", xoperands);
7654 /* We are done if there isn't a jump in the delay slot. */
7655 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7658 /* This call has an unconditional jump in its delay slot. */
7659 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7661 /* See if the return address can be adjusted. Use the containing
7662 sequence insn's address. */
7663 if (INSN_ADDRESSES_SET_P ())
7665 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7666 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7667 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7669 if (VAL_14_BITS_P (distance))
7671 xoperands[1] = gen_label_rtx ();
7672 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7673 targetm.asm_out.internal_label (asm_out_file, "L",
7674 CODE_LABEL_NUMBER (xoperands[1]));
7677 /* ??? This branch may not reach its target. */
7678 output_asm_insn ("nop\n\tb,n %0", xoperands);
7681 /* ??? This branch may not reach its target. */
7682 output_asm_insn ("nop\n\tb,n %0", xoperands);
7684 /* Delete the jump. */
7685 SET_INSN_DELETED (NEXT_INSN (insn));
7690 /* Return the attribute length of the call instruction INSN. The SIBCALL
7691 flag indicates whether INSN is a regular call or a sibling call. The
7692 length returned must be longer than the code actually generated by
7693 pa_output_call. Since branch shortening is done before delay branch
7694 sequencing, there is no way to determine whether or not the delay
7695 slot will be filled during branch shortening. Even when the delay
7696 slot is filled, we may have to add a nop if the delay slot contains
7697 a branch that can't reach its target. Thus, we always have to include
7698 the delay slot in the length estimate. This used to be done in
7699 pa_adjust_insn_length but we do it here now as some sequences always
7700 fill the delay slot and we can save four bytes in the estimate for
7704 pa_attr_length_call (rtx insn, int sibcall)
7707 rtx call, call_dest;
7710 rtx pat = PATTERN (insn);
7711 unsigned long distance = -1;
7713 gcc_assert (GET_CODE (insn) == CALL_INSN);
7715 if (INSN_ADDRESSES_SET_P ())
7717 unsigned long total;
7719 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7720 distance = (total + insn_current_reference_address (insn));
7721 if (distance < total)
7725 gcc_assert (GET_CODE (pat) == PARALLEL);
7727 /* Get the call rtx. */
7728 call = XVECEXP (pat, 0, 0);
7729 if (GET_CODE (call) == SET)
7730 call = SET_SRC (call);
7732 gcc_assert (GET_CODE (call) == CALL);
7734 /* Determine if this is a local call. */
7735 call_dest = XEXP (XEXP (call, 0), 0);
7736 call_decl = SYMBOL_REF_DECL (call_dest);
7737 local_call = call_decl && targetm.binds_local_p (call_decl);
7739 /* pc-relative branch. */
7740 if (!TARGET_LONG_CALLS
7741 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7742 || distance < MAX_PCREL17F_OFFSET))
7745 /* 64-bit plabel sequence. */
7746 else if (TARGET_64BIT && !local_call)
7747 length += sibcall ? 28 : 24;
7749 /* non-pic long absolute branch sequence. */
7750 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7753 /* long pc-relative branch sequence. */
7754 else if (TARGET_LONG_PIC_SDIFF_CALL
7755 || (TARGET_GAS && !TARGET_SOM
7756 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7760 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7764 /* 32-bit plabel sequence. */
7770 length += length_fp_args (insn);
7780 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7788 /* INSN is a function call. It may have an unconditional jump
7791 CALL_DEST is the routine we are calling. */
7794 pa_output_call (rtx insn, rtx call_dest, int sibcall)
7796 int delay_insn_deleted = 0;
7797 int delay_slot_filled = 0;
7798 int seq_length = dbr_sequence_length ();
7799 tree call_decl = SYMBOL_REF_DECL (call_dest);
7800 int local_call = call_decl && targetm.binds_local_p (call_decl);
7803 xoperands[0] = call_dest;
7805 /* Handle the common case where we're sure that the branch will reach
7806 the beginning of the "$CODE$" subspace. This is the beginning of
7807 the current function if we are in a named section. */
7808 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7810 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7811 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7815 if (TARGET_64BIT && !local_call)
7817 /* ??? As far as I can tell, the HP linker doesn't support the
7818 long pc-relative sequence described in the 64-bit runtime
7819 architecture. So, we use a slightly longer indirect call. */
7820 xoperands[0] = pa_get_deferred_plabel (call_dest);
7821 xoperands[1] = gen_label_rtx ();
7823 /* If this isn't a sibcall, we put the load of %r27 into the
7824 delay slot. We can't do this in a sibcall as we don't
7825 have a second call-clobbered scratch register available. */
7827 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7830 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7833 /* Now delete the delay insn. */
7834 SET_INSN_DELETED (NEXT_INSN (insn));
7835 delay_insn_deleted = 1;
7838 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7839 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7840 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7844 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7845 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7846 output_asm_insn ("bve (%%r1)", xoperands);
7850 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7851 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7852 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7853 delay_slot_filled = 1;
7858 int indirect_call = 0;
7860 /* Emit a long call. There are several different sequences
7861 of increasing length and complexity. In most cases,
7862 they don't allow an instruction in the delay slot. */
7863 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7864 && !TARGET_LONG_PIC_SDIFF_CALL
7865 && !(TARGET_GAS && !TARGET_SOM
7866 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7871 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7875 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7877 /* A non-jump insn in the delay slot. By definition we can
7878 emit this insn before the call (and in fact before argument
7880 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7883 /* Now delete the delay insn. */
7884 SET_INSN_DELETED (NEXT_INSN (insn));
7885 delay_insn_deleted = 1;
7888 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7890 /* This is the best sequence for making long calls in
7891 non-pic code. Unfortunately, GNU ld doesn't provide
7892 the stub needed for external calls, and GAS's support
7893 for this with the SOM linker is buggy. It is safe
7894 to use this for local calls. */
7895 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7897 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7901 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7904 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7906 output_asm_insn ("copy %%r31,%%r2", xoperands);
7907 delay_slot_filled = 1;
7912 if (TARGET_LONG_PIC_SDIFF_CALL)
7914 /* The HP assembler and linker can handle relocations
7915 for the difference of two symbols. The HP assembler
7916 recognizes the sequence as a pc-relative call and
7917 the linker provides stubs when needed. */
7918 xoperands[1] = gen_label_rtx ();
7919 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7920 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7921 targetm.asm_out.internal_label (asm_out_file, "L",
7922 CODE_LABEL_NUMBER (xoperands[1]));
7923 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7925 else if (TARGET_GAS && !TARGET_SOM
7926 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7928 /* GAS currently can't generate the relocations that
7929 are needed for the SOM linker under HP-UX using this
7930 sequence. The GNU linker doesn't generate the stubs
7931 that are needed for external calls on TARGET_ELF32
7932 with this sequence. For now, we have to use a
7933 longer plabel sequence when using GAS. */
7934 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7935 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7937 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7942 /* Emit a long plabel-based call sequence. This is
7943 essentially an inline implementation of $$dyncall.
7944 We don't actually try to call $$dyncall as this is
7945 as difficult as calling the function itself. */
7946 xoperands[0] = pa_get_deferred_plabel (call_dest);
7947 xoperands[1] = gen_label_rtx ();
7949 /* Since the call is indirect, FP arguments in registers
7950 need to be copied to the general registers. Then, the
7951 argument relocation stub will copy them back. */
7953 copy_fp_args (insn);
7957 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7958 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7959 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7963 output_asm_insn ("addil LR'%0-$global$,%%r27",
7965 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7969 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7970 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7971 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7972 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7974 if (!sibcall && !TARGET_PA_20)
7976 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7977 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7978 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7980 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7987 output_asm_insn ("bve (%%r1)", xoperands);
7992 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7993 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7994 delay_slot_filled = 1;
7997 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8002 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8003 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8008 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8009 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8011 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8015 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8016 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8018 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8021 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8023 output_asm_insn ("copy %%r31,%%r2", xoperands);
8024 delay_slot_filled = 1;
8031 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8032 output_asm_insn ("nop", xoperands);
8034 /* We are done if there isn't a jump in the delay slot. */
8036 || delay_insn_deleted
8037 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
8040 /* A sibcall should never have a branch in the delay slot. */
8041 gcc_assert (!sibcall);
8043 /* This call has an unconditional jump in its delay slot. */
8044 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8046 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8048 /* See if the return address can be adjusted. Use the containing
8049 sequence insn's address. This would break the regular call/return@
8050 relationship assumed by the table based eh unwinder, so only do that
8051 if the call is not possibly throwing. */
8052 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
8053 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8054 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8056 if (VAL_14_BITS_P (distance)
8057 && !(can_throw_internal (insn) || can_throw_external (insn)))
8059 xoperands[1] = gen_label_rtx ();
8060 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8061 targetm.asm_out.internal_label (asm_out_file, "L",
8062 CODE_LABEL_NUMBER (xoperands[1]));
8065 output_asm_insn ("nop\n\tb,n %0", xoperands);
8068 output_asm_insn ("b,n %0", xoperands);
8070 /* Delete the jump. */
8071 SET_INSN_DELETED (NEXT_INSN (insn));
8076 /* Return the attribute length of the indirect call instruction INSN.
8077 The length must match the code generated by output_indirect call.
8078 The returned length includes the delay slot. Currently, the delay
8079 slot of an indirect call sequence is not exposed and it is used by
8080 the sequence itself. */
8083 pa_attr_length_indirect_call (rtx insn)
8085 unsigned long distance = -1;
8086 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8088 if (INSN_ADDRESSES_SET_P ())
8090 distance = (total + insn_current_reference_address (insn));
8091 if (distance < total)
8098 if (TARGET_FAST_INDIRECT_CALLS
8099 || (!TARGET_PORTABLE_RUNTIME
8100 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8101 || distance < MAX_PCREL17F_OFFSET)))
8107 if (TARGET_PORTABLE_RUNTIME)
8110 /* Out of reach, can use ble. */
8115 pa_output_indirect_call (rtx insn, rtx call_dest)
8121 xoperands[0] = call_dest;
8122 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8123 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8127 /* First the special case for kernels, level 0 systems, etc. */
8128 if (TARGET_FAST_INDIRECT_CALLS)
8129 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8131 /* Now the normal case -- we can reach $$dyncall directly or
8132 we're sure that we can get there via a long-branch stub.
8134 No need to check target flags as the length uniquely identifies
8135 the remaining cases. */
8136 if (pa_attr_length_indirect_call (insn) == 8)
8138 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8139 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8140 variant of the B,L instruction can't be used on the SOM target. */
8141 if (TARGET_PA_20 && !TARGET_SOM)
8142 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8144 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8147 /* Long millicode call, but we are not generating PIC or portable runtime
8149 if (pa_attr_length_indirect_call (insn) == 12)
8150 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8152 /* Long millicode call for portable runtime. */
8153 if (pa_attr_length_indirect_call (insn) == 20)
8154 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
8156 /* We need a long PIC call to $$dyncall. */
8157 xoperands[0] = NULL_RTX;
8158 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8159 if (TARGET_SOM || !TARGET_GAS)
8161 xoperands[0] = gen_label_rtx ();
8162 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
8163 targetm.asm_out.internal_label (asm_out_file, "L",
8164 CODE_LABEL_NUMBER (xoperands[0]));
8165 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8169 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
8170 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8173 output_asm_insn ("blr %%r0,%%r2", xoperands);
8174 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
8178 /* In HPUX 8.0's shared library scheme, special relocations are needed
8179 for function labels if they might be passed to a function
8180 in a shared library (because shared libraries don't live in code
8181 space), and special magic is needed to construct their address. */
8184 pa_encode_label (rtx sym)
8186 const char *str = XSTR (sym, 0);
8187 int len = strlen (str) + 1;
8190 p = newstr = XALLOCAVEC (char, len + 1);
8194 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8198 pa_encode_section_info (tree decl, rtx rtl, int first)
8200 int old_referenced = 0;
8202 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8204 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8206 default_encode_section_info (decl, rtl, first);
8208 if (first && TEXT_SPACE_P (decl))
8210 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8211 if (TREE_CODE (decl) == FUNCTION_DECL)
8212 pa_encode_label (XEXP (rtl, 0));
8214 else if (old_referenced)
8215 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8218 /* This is sort of inverse to pa_encode_section_info. */
8221 pa_strip_name_encoding (const char *str)
8223 str += (*str == '@');
8224 str += (*str == '*');
8228 /* Returns 1 if OP is a function label involved in a simple addition
8229 with a constant. Used to keep certain patterns from matching
8230 during instruction combination. */
8232 pa_is_function_label_plus_const (rtx op)
8234 /* Strip off any CONST. */
8235 if (GET_CODE (op) == CONST)
8238 return (GET_CODE (op) == PLUS
8239 && function_label_operand (XEXP (op, 0), VOIDmode)
8240 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8243 /* Output assembly code for a thunk to FUNCTION. */
8246 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8247 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8250 static unsigned int current_thunk_number;
8251 int val_14 = VAL_14_BITS_P (delta);
8252 unsigned int old_last_address = last_address, nbytes = 0;
8256 xoperands[0] = XEXP (DECL_RTL (function), 0);
8257 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8258 xoperands[2] = GEN_INT (delta);
8260 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
8261 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8263 /* Output the thunk. We know that the function is in the same
8264 translation unit (i.e., the same space) as the thunk, and that
8265 thunks are output after their method. Thus, we don't need an
8266 external branch to reach the function. With SOM and GAS,
8267 functions and thunks are effectively in different sections.
8268 Thus, we can always use a IA-relative branch and the linker
8269 will add a long branch stub if necessary.
8271 However, we have to be careful when generating PIC code on the
8272 SOM port to ensure that the sequence does not transfer to an
8273 import stub for the target function as this could clobber the
8274 return value saved at SP-24. This would also apply to the
8275 32-bit linux port if the multi-space model is implemented. */
8276 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8277 && !(flag_pic && TREE_PUBLIC (function))
8278 && (TARGET_GAS || last_address < 262132))
8279 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8280 && ((targetm_common.have_named_sections
8281 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8282 /* The GNU 64-bit linker has rather poor stub management.
8283 So, we use a long branch from thunks that aren't in
8284 the same section as the target function. */
8286 && (DECL_SECTION_NAME (thunk_fndecl)
8287 != DECL_SECTION_NAME (function)))
8288 || ((DECL_SECTION_NAME (thunk_fndecl)
8289 == DECL_SECTION_NAME (function))
8290 && last_address < 262132)))
8291 || (targetm_common.have_named_sections
8292 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8293 && DECL_SECTION_NAME (function) == NULL
8294 && last_address < 262132)
8295 || (!targetm_common.have_named_sections
8296 && last_address < 262132))))
8299 output_asm_insn ("addil L'%2,%%r26", xoperands);
8301 output_asm_insn ("b %0", xoperands);
8305 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8310 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8314 else if (TARGET_64BIT)
8316 /* We only have one call-clobbered scratch register, so we can't
8317 make use of the delay slot if delta doesn't fit in 14 bits. */
8320 output_asm_insn ("addil L'%2,%%r26", xoperands);
8321 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8324 output_asm_insn ("b,l .+8,%%r1", xoperands);
8328 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8329 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8333 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8334 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8339 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8340 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8345 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8349 else if (TARGET_PORTABLE_RUNTIME)
8351 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8352 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8355 output_asm_insn ("addil L'%2,%%r26", xoperands);
8357 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8361 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8366 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8370 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8372 /* The function is accessible from outside this module. The only
8373 way to avoid an import stub between the thunk and function is to
8374 call the function directly with an indirect sequence similar to
8375 that used by $$dyncall. This is possible because $$dyncall acts
8376 as the import stub in an indirect call. */
8377 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8378 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8379 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8380 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8381 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8382 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8383 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8384 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8385 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8389 output_asm_insn ("addil L'%2,%%r26", xoperands);
8395 output_asm_insn ("bve (%%r22)", xoperands);
8398 else if (TARGET_NO_SPACE_REGS)
8400 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8405 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8406 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8407 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8412 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8414 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8418 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8420 if (TARGET_SOM || !TARGET_GAS)
8422 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8423 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8427 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8428 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8432 output_asm_insn ("addil L'%2,%%r26", xoperands);
8434 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8438 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8443 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8450 output_asm_insn ("addil L'%2,%%r26", xoperands);
8452 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8453 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8457 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8462 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8467 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8469 if (TARGET_SOM && TARGET_GAS)
8471 /* We done with this subspace except possibly for some additional
8472 debug information. Forget that we are in this subspace to ensure
8473 that the next function is output in its own subspace. */
8475 cfun->machine->in_nsubspa = 2;
8478 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8480 switch_to_section (data_section);
8481 output_asm_insn (".align 4", xoperands);
8482 ASM_OUTPUT_LABEL (file, label);
8483 output_asm_insn (".word P'%0", xoperands);
8486 current_thunk_number++;
8487 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8488 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8489 last_address += nbytes;
8490 if (old_last_address > last_address)
8491 last_address = UINT_MAX;
8492 update_total_code_bytes (nbytes);
8495 /* Only direct calls to static functions are allowed to be sibling (tail)
8498 This restriction is necessary because some linker generated stubs will
8499 store return pointers into rp' in some cases which might clobber a
8500 live value already in rp'.
8502 In a sibcall the current function and the target function share stack
8503 space. Thus if the path to the current function and the path to the
8504 target function save a value in rp', they save the value into the
8505 same stack slot, which has undesirable consequences.
8507 Because of the deferred binding nature of shared libraries any function
8508 with external scope could be in a different load module and thus require
8509 rp' to be saved when calling that function. So sibcall optimizations
8510 can only be safe for static function.
8512 Note that GCC never needs return value relocations, so we don't have to
8513 worry about static calls with return value relocations (which require
8516 It is safe to perform a sibcall optimization when the target function
8517 will never return. */
8519 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8521 if (TARGET_PORTABLE_RUNTIME)
8524 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8525 single subspace mode and the call is not indirect. As far as I know,
8526 there is no operating system support for the multiple subspace mode.
8527 It might be possible to support indirect calls if we didn't use
8528 $$dyncall (see the indirect sequence generated in pa_output_call). */
8530 return (decl != NULL_TREE);
8532 /* Sibcalls are not ok because the arg pointer register is not a fixed
8533 register. This prevents the sibcall optimization from occurring. In
8534 addition, there are problems with stub placement using GNU ld. This
8535 is because a normal sibcall branch uses a 17-bit relocation while
8536 a regular call branch uses a 22-bit relocation. As a result, more
8537 care needs to be taken in the placement of long-branch stubs. */
8541 /* Sibcalls are only ok within a translation unit. */
8542 return (decl && !TREE_PUBLIC (decl));
8545 /* ??? Addition is not commutative on the PA due to the weird implicit
8546 space register selection rules for memory addresses. Therefore, we
8547 don't consider a + b == b + a, as this might be inside a MEM. */
8549 pa_commutative_p (const_rtx x, int outer_code)
8551 return (COMMUTATIVE_P (x)
8552 && (TARGET_NO_SPACE_REGS
8553 || (outer_code != UNKNOWN && outer_code != MEM)
8554 || GET_CODE (x) != PLUS));
8557 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8558 use in fmpyadd instructions. */
8560 pa_fmpyaddoperands (rtx *operands)
8562 enum machine_mode mode = GET_MODE (operands[0]);
8564 /* Must be a floating point mode. */
8565 if (mode != SFmode && mode != DFmode)
8568 /* All modes must be the same. */
8569 if (! (mode == GET_MODE (operands[1])
8570 && mode == GET_MODE (operands[2])
8571 && mode == GET_MODE (operands[3])
8572 && mode == GET_MODE (operands[4])
8573 && mode == GET_MODE (operands[5])))
8576 /* All operands must be registers. */
8577 if (! (GET_CODE (operands[1]) == REG
8578 && GET_CODE (operands[2]) == REG
8579 && GET_CODE (operands[3]) == REG
8580 && GET_CODE (operands[4]) == REG
8581 && GET_CODE (operands[5]) == REG))
8584 /* Only 2 real operands to the addition. One of the input operands must
8585 be the same as the output operand. */
8586 if (! rtx_equal_p (operands[3], operands[4])
8587 && ! rtx_equal_p (operands[3], operands[5]))
8590 /* Inout operand of add cannot conflict with any operands from multiply. */
8591 if (rtx_equal_p (operands[3], operands[0])
8592 || rtx_equal_p (operands[3], operands[1])
8593 || rtx_equal_p (operands[3], operands[2]))
8596 /* multiply cannot feed into addition operands. */
8597 if (rtx_equal_p (operands[4], operands[0])
8598 || rtx_equal_p (operands[5], operands[0]))
8601 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8603 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8604 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8605 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8606 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8607 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8608 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8611 /* Passed. Operands are suitable for fmpyadd. */
8615 #if !defined(USE_COLLECT2)
8617 pa_asm_out_constructor (rtx symbol, int priority)
8619 if (!function_label_operand (symbol, VOIDmode))
8620 pa_encode_label (symbol);
8622 #ifdef CTORS_SECTION_ASM_OP
8623 default_ctor_section_asm_out_constructor (symbol, priority);
8625 # ifdef TARGET_ASM_NAMED_SECTION
8626 default_named_section_asm_out_constructor (symbol, priority);
8628 default_stabs_asm_out_constructor (symbol, priority);
8634 pa_asm_out_destructor (rtx symbol, int priority)
8636 if (!function_label_operand (symbol, VOIDmode))
8637 pa_encode_label (symbol);
8639 #ifdef DTORS_SECTION_ASM_OP
8640 default_dtor_section_asm_out_destructor (symbol, priority);
8642 # ifdef TARGET_ASM_NAMED_SECTION
8643 default_named_section_asm_out_destructor (symbol, priority);
8645 default_stabs_asm_out_destructor (symbol, priority);
8651 /* This function places uninitialized global data in the bss section.
8652 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8653 function on the SOM port to prevent uninitialized global data from
8654 being placed in the data section. */
8657 pa_asm_output_aligned_bss (FILE *stream,
8659 unsigned HOST_WIDE_INT size,
8662 switch_to_section (bss_section);
8663 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8665 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8666 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8669 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8670 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8673 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8674 ASM_OUTPUT_LABEL (stream, name);
8675 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8678 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8679 that doesn't allow the alignment of global common storage to be directly
8680 specified. The SOM linker aligns common storage based on the rounded
8681 value of the NUM_BYTES parameter in the .comm directive. It's not
8682 possible to use the .align directive as it doesn't affect the alignment
8683 of the label associated with a .comm directive. */
8686 pa_asm_output_aligned_common (FILE *stream,
8688 unsigned HOST_WIDE_INT size,
8691 unsigned int max_common_align;
8693 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8694 if (align > max_common_align)
8696 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8697 "for global common data. Using %u",
8698 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8699 align = max_common_align;
8702 switch_to_section (bss_section);
8704 assemble_name (stream, name);
8705 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8706 MAX (size, align / BITS_PER_UNIT));
8709 /* We can't use .comm for local common storage as the SOM linker effectively
8710 treats the symbol as universal and uses the same storage for local symbols
8711 with the same name in different object files. The .block directive
8712 reserves an uninitialized block of storage. However, it's not common
8713 storage. Fortunately, GCC never requests common storage with the same
8714 name in any given translation unit. */
8717 pa_asm_output_aligned_local (FILE *stream,
8719 unsigned HOST_WIDE_INT size,
8722 switch_to_section (bss_section);
8723 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8726 fprintf (stream, "%s", LOCAL_ASM_OP);
8727 assemble_name (stream, name);
8728 fprintf (stream, "\n");
8731 ASM_OUTPUT_LABEL (stream, name);
8732 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8735 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8736 use in fmpysub instructions. */
8738 pa_fmpysuboperands (rtx *operands)
8740 enum machine_mode mode = GET_MODE (operands[0]);
8742 /* Must be a floating point mode. */
8743 if (mode != SFmode && mode != DFmode)
8746 /* All modes must be the same. */
8747 if (! (mode == GET_MODE (operands[1])
8748 && mode == GET_MODE (operands[2])
8749 && mode == GET_MODE (operands[3])
8750 && mode == GET_MODE (operands[4])
8751 && mode == GET_MODE (operands[5])))
8754 /* All operands must be registers. */
8755 if (! (GET_CODE (operands[1]) == REG
8756 && GET_CODE (operands[2]) == REG
8757 && GET_CODE (operands[3]) == REG
8758 && GET_CODE (operands[4]) == REG
8759 && GET_CODE (operands[5]) == REG))
8762 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8763 operation, so operands[4] must be the same as operand[3]. */
8764 if (! rtx_equal_p (operands[3], operands[4]))
8767 /* multiply cannot feed into subtraction. */
8768 if (rtx_equal_p (operands[5], operands[0]))
8771 /* Inout operand of sub cannot conflict with any operands from multiply. */
8772 if (rtx_equal_p (operands[3], operands[0])
8773 || rtx_equal_p (operands[3], operands[1])
8774 || rtx_equal_p (operands[3], operands[2]))
8777 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8779 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8780 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8781 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8782 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8783 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8784 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8787 /* Passed. Operands are suitable for fmpysub. */
8791 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8792 constants for shadd instructions. */
8794 pa_shadd_constant_p (int val)
8796 if (val == 2 || val == 4 || val == 8)
8802 /* Return TRUE if INSN branches forward. */
8805 forward_branch_p (rtx insn)
8807 rtx lab = JUMP_LABEL (insn);
8809 /* The INSN must have a jump label. */
8810 gcc_assert (lab != NULL_RTX);
8812 if (INSN_ADDRESSES_SET_P ())
8813 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8820 insn = NEXT_INSN (insn);
8826 /* Return 1 if INSN is in the delay slot of a call instruction. */
8828 pa_jump_in_call_delay (rtx insn)
8831 if (GET_CODE (insn) != JUMP_INSN)
8834 if (PREV_INSN (insn)
8835 && PREV_INSN (PREV_INSN (insn))
8836 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8838 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8840 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8841 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8848 /* Output an unconditional move and branch insn. */
8851 pa_output_parallel_movb (rtx *operands, rtx insn)
8853 int length = get_attr_length (insn);
8855 /* These are the cases in which we win. */
8857 return "mov%I1b,tr %1,%0,%2";
8859 /* None of the following cases win, but they don't lose either. */
8862 if (dbr_sequence_length () == 0)
8864 /* Nothing in the delay slot, fake it by putting the combined
8865 insn (the copy or add) in the delay slot of a bl. */
8866 if (GET_CODE (operands[1]) == CONST_INT)
8867 return "b %2\n\tldi %1,%0";
8869 return "b %2\n\tcopy %1,%0";
8873 /* Something in the delay slot, but we've got a long branch. */
8874 if (GET_CODE (operands[1]) == CONST_INT)
8875 return "ldi %1,%0\n\tb %2";
8877 return "copy %1,%0\n\tb %2";
8881 if (GET_CODE (operands[1]) == CONST_INT)
8882 output_asm_insn ("ldi %1,%0", operands);
8884 output_asm_insn ("copy %1,%0", operands);
8885 return pa_output_lbranch (operands[2], insn, 1);
8888 /* Output an unconditional add and branch insn. */
8891 pa_output_parallel_addb (rtx *operands, rtx insn)
8893 int length = get_attr_length (insn);
8895 /* To make life easy we want operand0 to be the shared input/output
8896 operand and operand1 to be the readonly operand. */
8897 if (operands[0] == operands[1])
8898 operands[1] = operands[2];
8900 /* These are the cases in which we win. */
8902 return "add%I1b,tr %1,%0,%3";
8904 /* None of the following cases win, but they don't lose either. */
8907 if (dbr_sequence_length () == 0)
8908 /* Nothing in the delay slot, fake it by putting the combined
8909 insn (the copy or add) in the delay slot of a bl. */
8910 return "b %3\n\tadd%I1 %1,%0,%0";
8912 /* Something in the delay slot, but we've got a long branch. */
8913 return "add%I1 %1,%0,%0\n\tb %3";
8916 output_asm_insn ("add%I1 %1,%0,%0", operands);
8917 return pa_output_lbranch (operands[3], insn, 1);
8920 /* Return nonzero if INSN (a jump insn) immediately follows a call
8921 to a named function. This is used to avoid filling the delay slot
8922 of the jump since it can usually be eliminated by modifying RP in
8923 the delay slot of the call. */
8926 pa_following_call (rtx insn)
8928 if (! TARGET_JUMP_IN_DELAY)
8931 /* Find the previous real insn, skipping NOTEs. */
8932 insn = PREV_INSN (insn);
8933 while (insn && GET_CODE (insn) == NOTE)
8934 insn = PREV_INSN (insn);
8936 /* Check for CALL_INSNs and millicode calls. */
8938 && ((GET_CODE (insn) == CALL_INSN
8939 && get_attr_type (insn) != TYPE_DYNCALL)
8940 || (GET_CODE (insn) == INSN
8941 && GET_CODE (PATTERN (insn)) != SEQUENCE
8942 && GET_CODE (PATTERN (insn)) != USE
8943 && GET_CODE (PATTERN (insn)) != CLOBBER
8944 && get_attr_type (insn) == TYPE_MILLI)))
8950 /* We use this hook to perform a PA specific optimization which is difficult
8951 to do in earlier passes.
8953 We want the delay slots of branches within jump tables to be filled.
8954 None of the compiler passes at the moment even has the notion that a
8955 PA jump table doesn't contain addresses, but instead contains actual
8958 Because we actually jump into the table, the addresses of each entry
8959 must stay constant in relation to the beginning of the table (which
8960 itself must stay constant relative to the instruction to jump into
8961 it). I don't believe we can guarantee earlier passes of the compiler
8962 will adhere to those rules.
8964 So, late in the compilation process we find all the jump tables, and
8965 expand them into real code -- e.g. each entry in the jump table vector
8966 will get an appropriate label followed by a jump to the final target.
8968 Reorg and the final jump pass can then optimize these branches and
8969 fill their delay slots. We end up with smaller, more efficient code.
8971 The jump instructions within the table are special; we must be able
8972 to identify them during assembly output (if the jumps don't get filled
8973 we need to emit a nop rather than nullifying the delay slot)). We
8974 identify jumps in switch tables by using insns with the attribute
8975 type TYPE_BTABLE_BRANCH.
8977 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8978 insns. This serves two purposes, first it prevents jump.c from
8979 noticing that the last N entries in the table jump to the instruction
8980 immediately after the table and deleting the jumps. Second, those
8981 insns mark where we should emit .begin_brtab and .end_brtab directives
8982 when using GAS (allows for better link time optimizations). */
8989 remove_useless_addtr_insns (1);
8991 if (pa_cpu < PROCESSOR_8000)
8992 pa_combine_instructions ();
8995 /* This is fairly cheap, so always run it if optimizing. */
8996 if (optimize > 0 && !TARGET_BIG_SWITCH)
8998 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8999 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9001 rtx pattern, tmp, location, label;
9002 unsigned int length, i;
9004 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
9005 if (GET_CODE (insn) != JUMP_INSN
9006 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
9007 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
9010 /* Emit marker for the beginning of the branch table. */
9011 emit_insn_before (gen_begin_brtab (), insn);
9013 pattern = PATTERN (insn);
9014 location = PREV_INSN (insn);
9015 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
9017 for (i = 0; i < length; i++)
9019 /* Emit a label before each jump to keep jump.c from
9020 removing this code. */
9021 tmp = gen_label_rtx ();
9022 LABEL_NUSES (tmp) = 1;
9023 emit_label_after (tmp, location);
9024 location = NEXT_INSN (location);
9026 if (GET_CODE (pattern) == ADDR_VEC)
9027 label = XEXP (XVECEXP (pattern, 0, i), 0);
9029 label = XEXP (XVECEXP (pattern, 1, i), 0);
9031 tmp = gen_short_jump (label);
9033 /* Emit the jump itself. */
9034 tmp = emit_jump_insn_after (tmp, location);
9035 JUMP_LABEL (tmp) = label;
9036 LABEL_NUSES (label)++;
9037 location = NEXT_INSN (location);
9039 /* Emit a BARRIER after the jump. */
9040 emit_barrier_after (location);
9041 location = NEXT_INSN (location);
9044 /* Emit marker for the end of the branch table. */
9045 emit_insn_before (gen_end_brtab (), location);
9046 location = NEXT_INSN (location);
9047 emit_barrier_after (location);
9049 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
9055 /* Still need brtab marker insns. FIXME: the presence of these
9056 markers disables output of the branch table to readonly memory,
9057 and any alignment directives that might be needed. Possibly,
9058 the begin_brtab insn should be output before the label for the
9059 table. This doesn't matter at the moment since the tables are
9060 always output in the text section. */
9061 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9063 /* Find an ADDR_VEC insn. */
9064 if (GET_CODE (insn) != JUMP_INSN
9065 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
9066 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
9069 /* Now generate markers for the beginning and end of the
9071 emit_insn_before (gen_begin_brtab (), insn);
9072 emit_insn_after (gen_end_brtab (), insn);
9077 /* The PA has a number of odd instructions which can perform multiple
9078 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9079 it may be profitable to combine two instructions into one instruction
9080 with two outputs. It's not profitable PA2.0 machines because the
9081 two outputs would take two slots in the reorder buffers.
9083 This routine finds instructions which can be combined and combines
9084 them. We only support some of the potential combinations, and we
9085 only try common ways to find suitable instructions.
9087 * addb can add two registers or a register and a small integer
9088 and jump to a nearby (+-8k) location. Normally the jump to the
9089 nearby location is conditional on the result of the add, but by
9090 using the "true" condition we can make the jump unconditional.
9091 Thus addb can perform two independent operations in one insn.
9093 * movb is similar to addb in that it can perform a reg->reg
9094 or small immediate->reg copy and jump to a nearby (+-8k location).
9096 * fmpyadd and fmpysub can perform a FP multiply and either an
9097 FP add or FP sub if the operands of the multiply and add/sub are
9098 independent (there are other minor restrictions). Note both
9099 the fmpy and fadd/fsub can in theory move to better spots according
9100 to data dependencies, but for now we require the fmpy stay at a
9103 * Many of the memory operations can perform pre & post updates
9104 of index registers. GCC's pre/post increment/decrement addressing
9105 is far too simple to take advantage of all the possibilities. This
9106 pass may not be suitable since those insns may not be independent.
9108 * comclr can compare two ints or an int and a register, nullify
9109 the following instruction and zero some other register. This
9110 is more difficult to use as it's harder to find an insn which
9111 will generate a comclr than finding something like an unconditional
9112 branch. (conditional moves & long branches create comclr insns).
9114 * Most arithmetic operations can conditionally skip the next
9115 instruction. They can be viewed as "perform this operation
9116 and conditionally jump to this nearby location" (where nearby
9117 is an insns away). These are difficult to use due to the
9118 branch length restrictions. */
9121 pa_combine_instructions (void)
9123 rtx anchor, new_rtx;
9125 /* This can get expensive since the basic algorithm is on the
9126 order of O(n^2) (or worse). Only do it for -O2 or higher
9127 levels of optimization. */
9131 /* Walk down the list of insns looking for "anchor" insns which
9132 may be combined with "floating" insns. As the name implies,
9133 "anchor" instructions don't move, while "floating" insns may
9135 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9136 new_rtx = make_insn_raw (new_rtx);
9138 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9140 enum attr_pa_combine_type anchor_attr;
9141 enum attr_pa_combine_type floater_attr;
9143 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9144 Also ignore any special USE insns. */
9145 if ((GET_CODE (anchor) != INSN
9146 && GET_CODE (anchor) != JUMP_INSN
9147 && GET_CODE (anchor) != CALL_INSN)
9148 || GET_CODE (PATTERN (anchor)) == USE
9149 || GET_CODE (PATTERN (anchor)) == CLOBBER
9150 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
9151 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
9154 anchor_attr = get_attr_pa_combine_type (anchor);
9155 /* See if anchor is an insn suitable for combination. */
9156 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9157 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9158 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9159 && ! forward_branch_p (anchor)))
9163 for (floater = PREV_INSN (anchor);
9165 floater = PREV_INSN (floater))
9167 if (GET_CODE (floater) == NOTE
9168 || (GET_CODE (floater) == INSN
9169 && (GET_CODE (PATTERN (floater)) == USE
9170 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9173 /* Anything except a regular INSN will stop our search. */
9174 if (GET_CODE (floater) != INSN
9175 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9176 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9182 /* See if FLOATER is suitable for combination with the
9184 floater_attr = get_attr_pa_combine_type (floater);
9185 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9186 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9187 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9188 && floater_attr == PA_COMBINE_TYPE_FMPY))
9190 /* If ANCHOR and FLOATER can be combined, then we're
9191 done with this pass. */
9192 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9193 SET_DEST (PATTERN (floater)),
9194 XEXP (SET_SRC (PATTERN (floater)), 0),
9195 XEXP (SET_SRC (PATTERN (floater)), 1)))
9199 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9200 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9202 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9204 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9205 SET_DEST (PATTERN (floater)),
9206 XEXP (SET_SRC (PATTERN (floater)), 0),
9207 XEXP (SET_SRC (PATTERN (floater)), 1)))
9212 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9213 SET_DEST (PATTERN (floater)),
9214 SET_SRC (PATTERN (floater)),
9215 SET_SRC (PATTERN (floater))))
9221 /* If we didn't find anything on the backwards scan try forwards. */
9223 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9224 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9226 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9228 if (GET_CODE (floater) == NOTE
9229 || (GET_CODE (floater) == INSN
9230 && (GET_CODE (PATTERN (floater)) == USE
9231 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9235 /* Anything except a regular INSN will stop our search. */
9236 if (GET_CODE (floater) != INSN
9237 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9238 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9244 /* See if FLOATER is suitable for combination with the
9246 floater_attr = get_attr_pa_combine_type (floater);
9247 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9248 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9249 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9250 && floater_attr == PA_COMBINE_TYPE_FMPY))
9252 /* If ANCHOR and FLOATER can be combined, then we're
9253 done with this pass. */
9254 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9255 SET_DEST (PATTERN (floater)),
9256 XEXP (SET_SRC (PATTERN (floater)),
9258 XEXP (SET_SRC (PATTERN (floater)),
9265 /* FLOATER will be nonzero if we found a suitable floating
9266 insn for combination with ANCHOR. */
9268 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9269 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9271 /* Emit the new instruction and delete the old anchor. */
9272 emit_insn_before (gen_rtx_PARALLEL
9274 gen_rtvec (2, PATTERN (anchor),
9275 PATTERN (floater))),
9278 SET_INSN_DELETED (anchor);
9280 /* Emit a special USE insn for FLOATER, then delete
9281 the floating insn. */
9282 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9283 delete_insn (floater);
9288 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9291 /* Emit the new_jump instruction and delete the old anchor. */
9293 = emit_jump_insn_before (gen_rtx_PARALLEL
9295 gen_rtvec (2, PATTERN (anchor),
9296 PATTERN (floater))),
9299 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9300 SET_INSN_DELETED (anchor);
9302 /* Emit a special USE insn for FLOATER, then delete
9303 the floating insn. */
9304 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9305 delete_insn (floater);
9313 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9316 int insn_code_number;
9319 /* Create a PARALLEL with the patterns of ANCHOR and
9320 FLOATER, try to recognize it, then test constraints
9321 for the resulting pattern.
9323 If the pattern doesn't match or the constraints
9324 aren't met keep searching for a suitable floater
9326 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9327 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9328 INSN_CODE (new_rtx) = -1;
9329 insn_code_number = recog_memoized (new_rtx);
9330 if (insn_code_number < 0
9331 || (extract_insn (new_rtx), ! constrain_operands (1)))
9345 /* There's up to three operands to consider. One
9346 output and two inputs.
9348 The output must not be used between FLOATER & ANCHOR
9349 exclusive. The inputs must not be set between
9350 FLOATER and ANCHOR exclusive. */
9352 if (reg_used_between_p (dest, start, end))
9355 if (reg_set_between_p (src1, start, end))
9358 if (reg_set_between_p (src2, start, end))
9361 /* If we get here, then everything is good. */
9365 /* Return nonzero if references for INSN are delayed.
9367 Millicode insns are actually function calls with some special
9368 constraints on arguments and register usage.
9370 Millicode calls always expect their arguments in the integer argument
9371 registers, and always return their result in %r29 (ret1). They
9372 are expected to clobber their arguments, %r1, %r29, and the return
9373 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9375 This function tells reorg that the references to arguments and
9376 millicode calls do not appear to happen until after the millicode call.
9377 This allows reorg to put insns which set the argument registers into the
9378 delay slot of the millicode call -- thus they act more like traditional
9381 Note we cannot consider side effects of the insn to be delayed because
9382 the branch and link insn will clobber the return pointer. If we happened
9383 to use the return pointer in the delay slot of the call, then we lose.
9385 get_attr_type will try to recognize the given insn, so make sure to
9386 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9389 pa_insn_refs_are_delayed (rtx insn)
9391 return ((GET_CODE (insn) == INSN
9392 && GET_CODE (PATTERN (insn)) != SEQUENCE
9393 && GET_CODE (PATTERN (insn)) != USE
9394 && GET_CODE (PATTERN (insn)) != CLOBBER
9395 && get_attr_type (insn) == TYPE_MILLI));
9398 /* Promote the return value, but not the arguments. */
9400 static enum machine_mode
9401 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9402 enum machine_mode mode,
9403 int *punsignedp ATTRIBUTE_UNUSED,
9404 const_tree fntype ATTRIBUTE_UNUSED,
9407 if (for_return == 0)
9409 return promote_mode (type, mode, punsignedp);
9412 /* On the HP-PA the value is found in register(s) 28(-29), unless
9413 the mode is SF or DF. Then the value is returned in fr4 (32).
9415 This must perform the same promotions as PROMOTE_MODE, else promoting
9416 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9418 Small structures must be returned in a PARALLEL on PA64 in order
9419 to match the HP Compiler ABI. */
9422 pa_function_value (const_tree valtype,
9423 const_tree func ATTRIBUTE_UNUSED,
9424 bool outgoing ATTRIBUTE_UNUSED)
9426 enum machine_mode valmode;
9428 if (AGGREGATE_TYPE_P (valtype)
9429 || TREE_CODE (valtype) == COMPLEX_TYPE
9430 || TREE_CODE (valtype) == VECTOR_TYPE)
9434 /* Aggregates with a size less than or equal to 128 bits are
9435 returned in GR 28(-29). They are left justified. The pad
9436 bits are undefined. Larger aggregates are returned in
9440 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9442 for (i = 0; i < ub; i++)
9444 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9445 gen_rtx_REG (DImode, 28 + i),
9450 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9452 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9454 /* Aggregates 5 to 8 bytes in size are returned in general
9455 registers r28-r29 in the same manner as other non
9456 floating-point objects. The data is right-justified and
9457 zero-extended to 64 bits. This is opposite to the normal
9458 justification used on big endian targets and requires
9459 special treatment. */
9460 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9461 gen_rtx_REG (DImode, 28), const0_rtx);
9462 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9466 if ((INTEGRAL_TYPE_P (valtype)
9467 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9468 || POINTER_TYPE_P (valtype))
9469 valmode = word_mode;
9471 valmode = TYPE_MODE (valtype);
9473 if (TREE_CODE (valtype) == REAL_TYPE
9474 && !AGGREGATE_TYPE_P (valtype)
9475 && TYPE_MODE (valtype) != TFmode
9476 && !TARGET_SOFT_FLOAT)
9477 return gen_rtx_REG (valmode, 32);
9479 return gen_rtx_REG (valmode, 28);
9482 /* Implement the TARGET_LIBCALL_VALUE hook. */
9485 pa_libcall_value (enum machine_mode mode,
9486 const_rtx fun ATTRIBUTE_UNUSED)
9488 if (! TARGET_SOFT_FLOAT
9489 && (mode == SFmode || mode == DFmode))
9490 return gen_rtx_REG (mode, 32);
9492 return gen_rtx_REG (mode, 28);
9495 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9498 pa_function_value_regno_p (const unsigned int regno)
9501 || (! TARGET_SOFT_FLOAT && regno == 32))
9507 /* Update the data in CUM to advance over an argument
9508 of mode MODE and data type TYPE.
9509 (TYPE is null for libcalls where that information may not be available.) */
9512 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9513 const_tree type, bool named ATTRIBUTE_UNUSED)
9515 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9516 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9518 cum->nargs_prototype--;
9519 cum->words += (arg_size
9520 + ((cum->words & 01)
9521 && type != NULL_TREE
9525 /* Return the location of a parameter that is passed in a register or NULL
9526 if the parameter has any component that is passed in memory.
9528 This is new code and will be pushed to into the net sources after
9531 ??? We might want to restructure this so that it looks more like other
9534 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9535 const_tree type, bool named ATTRIBUTE_UNUSED)
9537 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9538 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9545 if (mode == VOIDmode)
9548 arg_size = FUNCTION_ARG_SIZE (mode, type);
9550 /* If this arg would be passed partially or totally on the stack, then
9551 this routine should return zero. pa_arg_partial_bytes will
9552 handle arguments which are split between regs and stack slots if
9553 the ABI mandates split arguments. */
9556 /* The 32-bit ABI does not split arguments. */
9557 if (cum->words + arg_size > max_arg_words)
9563 alignment = cum->words & 1;
9564 if (cum->words + alignment >= max_arg_words)
9568 /* The 32bit ABIs and the 64bit ABIs are rather different,
9569 particularly in their handling of FP registers. We might
9570 be able to cleverly share code between them, but I'm not
9571 going to bother in the hope that splitting them up results
9572 in code that is more easily understood. */
9576 /* Advance the base registers to their current locations.
9578 Remember, gprs grow towards smaller register numbers while
9579 fprs grow to higher register numbers. Also remember that
9580 although FP regs are 32-bit addressable, we pretend that
9581 the registers are 64-bits wide. */
9582 gpr_reg_base = 26 - cum->words;
9583 fpr_reg_base = 32 + cum->words;
9585 /* Arguments wider than one word and small aggregates need special
9589 || (type && (AGGREGATE_TYPE_P (type)
9590 || TREE_CODE (type) == COMPLEX_TYPE
9591 || TREE_CODE (type) == VECTOR_TYPE)))
9593 /* Double-extended precision (80-bit), quad-precision (128-bit)
9594 and aggregates including complex numbers are aligned on
9595 128-bit boundaries. The first eight 64-bit argument slots
9596 are associated one-to-one, with general registers r26
9597 through r19, and also with floating-point registers fr4
9598 through fr11. Arguments larger than one word are always
9599 passed in general registers.
9601 Using a PARALLEL with a word mode register results in left
9602 justified data on a big-endian target. */
9605 int i, offset = 0, ub = arg_size;
9607 /* Align the base register. */
9608 gpr_reg_base -= alignment;
9610 ub = MIN (ub, max_arg_words - cum->words - alignment);
9611 for (i = 0; i < ub; i++)
9613 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9614 gen_rtx_REG (DImode, gpr_reg_base),
9620 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9625 /* If the argument is larger than a word, then we know precisely
9626 which registers we must use. */
9640 /* Structures 5 to 8 bytes in size are passed in the general
9641 registers in the same manner as other non floating-point
9642 objects. The data is right-justified and zero-extended
9643 to 64 bits. This is opposite to the normal justification
9644 used on big endian targets and requires special treatment.
9645 We now define BLOCK_REG_PADDING to pad these objects.
9646 Aggregates, complex and vector types are passed in the same
9647 manner as structures. */
9649 || (type && (AGGREGATE_TYPE_P (type)
9650 || TREE_CODE (type) == COMPLEX_TYPE
9651 || TREE_CODE (type) == VECTOR_TYPE)))
9653 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9654 gen_rtx_REG (DImode, gpr_reg_base),
9656 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9661 /* We have a single word (32 bits). A simple computation
9662 will get us the register #s we need. */
9663 gpr_reg_base = 26 - cum->words;
9664 fpr_reg_base = 32 + 2 * cum->words;
9668 /* Determine if the argument needs to be passed in both general and
9669 floating point registers. */
9670 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9671 /* If we are doing soft-float with portable runtime, then there
9672 is no need to worry about FP regs. */
9673 && !TARGET_SOFT_FLOAT
9674 /* The parameter must be some kind of scalar float, else we just
9675 pass it in integer registers. */
9676 && GET_MODE_CLASS (mode) == MODE_FLOAT
9677 /* The target function must not have a prototype. */
9678 && cum->nargs_prototype <= 0
9679 /* libcalls do not need to pass items in both FP and general
9681 && type != NULL_TREE
9682 /* All this hair applies to "outgoing" args only. This includes
9683 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9685 /* Also pass outgoing floating arguments in both registers in indirect
9686 calls with the 32 bit ABI and the HP assembler since there is no
9687 way to the specify argument locations in static functions. */
9692 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9698 gen_rtx_EXPR_LIST (VOIDmode,
9699 gen_rtx_REG (mode, fpr_reg_base),
9701 gen_rtx_EXPR_LIST (VOIDmode,
9702 gen_rtx_REG (mode, gpr_reg_base),
9707 /* See if we should pass this parameter in a general register. */
9708 if (TARGET_SOFT_FLOAT
9709 /* Indirect calls in the normal 32bit ABI require all arguments
9710 to be passed in general registers. */
9711 || (!TARGET_PORTABLE_RUNTIME
9715 /* If the parameter is not a scalar floating-point parameter,
9716 then it belongs in GPRs. */
9717 || GET_MODE_CLASS (mode) != MODE_FLOAT
9718 /* Structure with single SFmode field belongs in GPR. */
9719 || (type && AGGREGATE_TYPE_P (type)))
9720 retval = gen_rtx_REG (mode, gpr_reg_base);
9722 retval = gen_rtx_REG (mode, fpr_reg_base);
9727 /* Arguments larger than one word are double word aligned. */
9730 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9732 bool singleword = (type
9733 ? (integer_zerop (TYPE_SIZE (type))
9734 || !TREE_CONSTANT (TYPE_SIZE (type))
9735 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9736 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9738 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9741 /* If this arg would be passed totally in registers or totally on the stack,
9742 then this routine should return zero. */
9745 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9746 tree type, bool named ATTRIBUTE_UNUSED)
9748 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9749 unsigned int max_arg_words = 8;
9750 unsigned int offset = 0;
9755 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9758 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9759 /* Arg fits fully into registers. */
9761 else if (cum->words + offset >= max_arg_words)
9762 /* Arg fully on the stack. */
9766 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9770 /* A get_unnamed_section callback for switching to the text section.
9772 This function is only used with SOM. Because we don't support
9773 named subspaces, we can only create a new subspace or switch back
9774 to the default text subspace. */
9777 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9779 gcc_assert (TARGET_SOM);
9782 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9784 /* We only want to emit a .nsubspa directive once at the
9785 start of the function. */
9786 cfun->machine->in_nsubspa = 1;
9788 /* Create a new subspace for the text. This provides
9789 better stub placement and one-only functions. */
9791 && DECL_ONE_ONLY (cfun->decl)
9792 && !DECL_WEAK (cfun->decl))
9794 output_section_asm_op ("\t.SPACE $TEXT$\n"
9795 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9796 "ACCESS=44,SORT=24,COMDAT");
9802 /* There isn't a current function or the body of the current
9803 function has been completed. So, we are changing to the
9804 text section to output debugging information. Thus, we
9805 need to forget that we are in the text section so that
9806 varasm.c will call us when text_section is selected again. */
9807 gcc_assert (!cfun || !cfun->machine
9808 || cfun->machine->in_nsubspa == 2);
9811 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9814 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9817 /* A get_unnamed_section callback for switching to comdat data
9818 sections. This function is only used with SOM. */
9821 som_output_comdat_data_section_asm_op (const void *data)
9824 output_section_asm_op (data);
9827 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9830 pa_som_asm_init_sections (void)
9833 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9835 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9836 is not being generated. */
9837 som_readonly_data_section
9838 = get_unnamed_section (0, output_section_asm_op,
9839 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9841 /* When secondary definitions are not supported, SOM makes readonly
9842 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9844 som_one_only_readonly_data_section
9845 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9847 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9848 "ACCESS=0x2c,SORT=16,COMDAT");
9851 /* When secondary definitions are not supported, SOM makes data one-only
9852 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9853 som_one_only_data_section
9854 = get_unnamed_section (SECTION_WRITE,
9855 som_output_comdat_data_section_asm_op,
9856 "\t.SPACE $PRIVATE$\n"
9857 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9858 "ACCESS=31,SORT=24,COMDAT");
9861 som_tm_clone_table_section
9862 = get_unnamed_section (0, output_section_asm_op,
9863 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9865 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9866 which reference data within the $TEXT$ space (for example constant
9867 strings in the $LIT$ subspace).
9869 The assemblers (GAS and HP as) both have problems with handling
9870 the difference of two symbols which is the other correct way to
9871 reference constant data during PIC code generation.
9873 So, there's no way to reference constant data which is in the
9874 $TEXT$ space during PIC generation. Instead place all constant
9875 data into the $PRIVATE$ subspace (this reduces sharing, but it
9876 works correctly). */
9877 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9879 /* We must not have a reference to an external symbol defined in a
9880 shared library in a readonly section, else the SOM linker will
9883 So, we force exception information into the data section. */
9884 exception_section = data_section;
9887 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9890 pa_som_tm_clone_table_section (void)
9892 return som_tm_clone_table_section;
9895 /* On hpux10, the linker will give an error if we have a reference
9896 in the read-only data section to a symbol defined in a shared
9897 library. Therefore, expressions that might require a reloc can
9898 not be placed in the read-only data section. */
9901 pa_select_section (tree exp, int reloc,
9902 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9904 if (TREE_CODE (exp) == VAR_DECL
9905 && TREE_READONLY (exp)
9906 && !TREE_THIS_VOLATILE (exp)
9907 && DECL_INITIAL (exp)
9908 && (DECL_INITIAL (exp) == error_mark_node
9909 || TREE_CONSTANT (DECL_INITIAL (exp)))
9913 && DECL_ONE_ONLY (exp)
9914 && !DECL_WEAK (exp))
9915 return som_one_only_readonly_data_section;
9917 return readonly_data_section;
9919 else if (CONSTANT_CLASS_P (exp) && !reloc)
9920 return readonly_data_section;
9922 && TREE_CODE (exp) == VAR_DECL
9923 && DECL_ONE_ONLY (exp)
9924 && !DECL_WEAK (exp))
9925 return som_one_only_data_section;
9927 return data_section;
9931 pa_globalize_label (FILE *stream, const char *name)
9933 /* We only handle DATA objects here, functions are globalized in
9934 ASM_DECLARE_FUNCTION_NAME. */
9935 if (! FUNCTION_NAME_P (name))
9937 fputs ("\t.EXPORT ", stream);
9938 assemble_name (stream, name);
9939 fputs (",DATA\n", stream);
9943 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9946 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9947 int incoming ATTRIBUTE_UNUSED)
9949 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9952 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9955 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9957 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9958 PA64 ABI says that objects larger than 128 bits are returned in memory.
9959 Note, int_size_in_bytes can return -1 if the size of the object is
9960 variable or larger than the maximum value that can be expressed as
9961 a HOST_WIDE_INT. It can also return zero for an empty type. The
9962 simplest way to handle variable and empty types is to pass them in
9963 memory. This avoids problems in defining the boundaries of argument
9964 slots, allocating registers, etc. */
9965 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9966 || int_size_in_bytes (type) <= 0);
9969 /* Structure to hold declaration and name of external symbols that are
9970 emitted by GCC. We generate a vector of these symbols and output them
9971 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9972 This avoids putting out names that are never really used. */
9974 typedef struct GTY(()) extern_symbol
9980 /* Define gc'd vector type for extern_symbol. */
9982 /* Vector of extern_symbol pointers. */
9983 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9985 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9986 /* Mark DECL (name NAME) as an external reference (assembler output
9987 file FILE). This saves the names to output at the end of the file
9988 if actually referenced. */
9991 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9993 gcc_assert (file == asm_out_file);
9994 extern_symbol p = {decl, name};
9995 vec_safe_push (extern_symbols, p);
9998 /* Output text required at the end of an assembler file.
9999 This includes deferred plabels and .import directives for
10000 all external symbols that were actually referenced. */
10003 pa_hpux_file_end (void)
10008 if (!NO_DEFERRED_PROFILE_COUNTERS)
10009 output_deferred_profile_counters ();
10011 output_deferred_plabels ();
10013 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
10015 tree decl = p->decl;
10017 if (!TREE_ASM_WRITTEN (decl)
10018 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
10019 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
10022 vec_free (extern_symbols);
10026 /* Return true if a change from mode FROM to mode TO for a register
10027 in register class RCLASS is invalid. */
10030 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
10031 enum reg_class rclass)
10036 /* Reject changes to/from complex and vector modes. */
10037 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
10038 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
10041 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
10044 /* There is no way to load QImode or HImode values directly from
10045 memory. SImode loads to the FP registers are not zero extended.
10046 On the 64-bit target, this conflicts with the definition of
10047 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
10048 with different sizes in the floating-point registers. */
10049 if (MAYBE_FP_REG_CLASS_P (rclass))
10052 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
10053 in specific sets of registers. Thus, we cannot allow changing
10054 to a larger mode when it's larger than a word. */
10055 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10056 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10062 /* Returns TRUE if it is a good idea to tie two pseudo registers
10063 when one has mode MODE1 and one has mode MODE2.
10064 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
10065 for any hard reg, then this must be FALSE for correct output.
10067 We should return FALSE for QImode and HImode because these modes
10068 are not ok in the floating-point registers. However, this prevents
10069 tieing these modes to SImode and DImode in the general registers.
10070 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
10071 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
10072 in the floating-point registers. */
10075 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
10077 /* Don't tie modes in different classes. */
10078 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10085 /* Length in units of the trampoline instruction code. */
10087 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10090 /* Output assembler code for a block containing the constant parts
10091 of a trampoline, leaving space for the variable parts.\
10093 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10094 and then branches to the specified routine.
10096 This code template is copied from text segment to stack location
10097 and then patched with pa_trampoline_init to contain valid values,
10098 and then entered as a subroutine.
10100 It is best to keep this as small as possible to avoid having to
10101 flush multiple lines in the cache. */
10104 pa_asm_trampoline_template (FILE *f)
10108 fputs ("\tldw 36(%r22),%r21\n", f);
10109 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10110 if (ASSEMBLER_DIALECT == 0)
10111 fputs ("\tdepi 0,31,2,%r21\n", f);
10113 fputs ("\tdepwi 0,31,2,%r21\n", f);
10114 fputs ("\tldw 4(%r21),%r19\n", f);
10115 fputs ("\tldw 0(%r21),%r21\n", f);
10118 fputs ("\tbve (%r21)\n", f);
10119 fputs ("\tldw 40(%r22),%r29\n", f);
10120 fputs ("\t.word 0\n", f);
10121 fputs ("\t.word 0\n", f);
10125 fputs ("\tldsid (%r21),%r1\n", f);
10126 fputs ("\tmtsp %r1,%sr0\n", f);
10127 fputs ("\tbe 0(%sr0,%r21)\n", f);
10128 fputs ("\tldw 40(%r22),%r29\n", f);
10130 fputs ("\t.word 0\n", f);
10131 fputs ("\t.word 0\n", f);
10132 fputs ("\t.word 0\n", f);
10133 fputs ("\t.word 0\n", f);
10137 fputs ("\t.dword 0\n", f);
10138 fputs ("\t.dword 0\n", f);
10139 fputs ("\t.dword 0\n", f);
10140 fputs ("\t.dword 0\n", f);
10141 fputs ("\tmfia %r31\n", f);
10142 fputs ("\tldd 24(%r31),%r1\n", f);
10143 fputs ("\tldd 24(%r1),%r27\n", f);
10144 fputs ("\tldd 16(%r1),%r1\n", f);
10145 fputs ("\tbve (%r1)\n", f);
10146 fputs ("\tldd 32(%r31),%r31\n", f);
10147 fputs ("\t.dword 0 ; fptr\n", f);
10148 fputs ("\t.dword 0 ; static link\n", f);
10152 /* Emit RTL insns to initialize the variable parts of a trampoline.
10153 FNADDR is an RTX for the address of the function's pure code.
10154 CXT is an RTX for the static chain value for the function.
10156 Move the function address to the trampoline template at offset 36.
10157 Move the static chain value to trampoline template at offset 40.
10158 Move the trampoline address to trampoline template at offset 44.
10159 Move r19 to trampoline template at offset 48. The latter two
10160 words create a plabel for the indirect call to the trampoline.
10162 A similar sequence is used for the 64-bit port but the plabel is
10163 at the beginning of the trampoline.
10165 Finally, the cache entries for the trampoline code are flushed.
10166 This is necessary to ensure that the trampoline instruction sequence
10167 is written to memory prior to any attempts at prefetching the code
10171 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10173 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10174 rtx start_addr = gen_reg_rtx (Pmode);
10175 rtx end_addr = gen_reg_rtx (Pmode);
10176 rtx line_length = gen_reg_rtx (Pmode);
10179 emit_block_move (m_tramp, assemble_trampoline_template (),
10180 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10181 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10185 tmp = adjust_address (m_tramp, Pmode, 36);
10186 emit_move_insn (tmp, fnaddr);
10187 tmp = adjust_address (m_tramp, Pmode, 40);
10188 emit_move_insn (tmp, chain_value);
10190 /* Create a fat pointer for the trampoline. */
10191 tmp = adjust_address (m_tramp, Pmode, 44);
10192 emit_move_insn (tmp, r_tramp);
10193 tmp = adjust_address (m_tramp, Pmode, 48);
10194 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10196 /* fdc and fic only use registers for the address to flush,
10197 they do not accept integer displacements. We align the
10198 start and end addresses to the beginning of their respective
10199 cache lines to minimize the number of lines flushed. */
10200 emit_insn (gen_andsi3 (start_addr, r_tramp,
10201 GEN_INT (-MIN_CACHELINE_SIZE)));
10202 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10203 TRAMPOLINE_CODE_SIZE-1));
10204 emit_insn (gen_andsi3 (end_addr, tmp,
10205 GEN_INT (-MIN_CACHELINE_SIZE)));
10206 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10207 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10208 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10209 gen_reg_rtx (Pmode),
10210 gen_reg_rtx (Pmode)));
10214 tmp = adjust_address (m_tramp, Pmode, 56);
10215 emit_move_insn (tmp, fnaddr);
10216 tmp = adjust_address (m_tramp, Pmode, 64);
10217 emit_move_insn (tmp, chain_value);
10219 /* Create a fat pointer for the trampoline. */
10220 tmp = adjust_address (m_tramp, Pmode, 16);
10221 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10223 tmp = adjust_address (m_tramp, Pmode, 24);
10224 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10226 /* fdc and fic only use registers for the address to flush,
10227 they do not accept integer displacements. We align the
10228 start and end addresses to the beginning of their respective
10229 cache lines to minimize the number of lines flushed. */
10230 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10231 emit_insn (gen_anddi3 (start_addr, tmp,
10232 GEN_INT (-MIN_CACHELINE_SIZE)));
10233 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10234 TRAMPOLINE_CODE_SIZE - 1));
10235 emit_insn (gen_anddi3 (end_addr, tmp,
10236 GEN_INT (-MIN_CACHELINE_SIZE)));
10237 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10238 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10239 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10240 gen_reg_rtx (Pmode),
10241 gen_reg_rtx (Pmode)));
10245 /* Perform any machine-specific adjustment in the address of the trampoline.
10246 ADDR contains the address that was passed to pa_trampoline_init.
10247 Adjust the trampoline address to point to the plabel at offset 44. */
10250 pa_trampoline_adjust_address (rtx addr)
10253 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10258 pa_delegitimize_address (rtx orig_x)
10260 rtx x = delegitimize_mem_from_attrs (orig_x);
10262 if (GET_CODE (x) == LO_SUM
10263 && GET_CODE (XEXP (x, 1)) == UNSPEC
10264 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10265 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10270 pa_internal_arg_pointer (void)
10272 /* The argument pointer and the hard frame pointer are the same in
10273 the 32-bit runtime, so we don't need a copy. */
10275 return copy_to_reg (virtual_incoming_args_rtx);
10277 return virtual_incoming_args_rtx;
10280 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10281 Frame pointer elimination is automatically handled. */
10284 pa_can_eliminate (const int from, const int to)
10286 /* The argument cannot be eliminated in the 64-bit runtime. */
10287 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10290 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10291 ? ! frame_pointer_needed
10295 /* Define the offset between two registers, FROM to be eliminated and its
10296 replacement TO, at the start of a routine. */
10298 pa_initial_elimination_offset (int from, int to)
10300 HOST_WIDE_INT offset;
10302 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10303 && to == STACK_POINTER_REGNUM)
10304 offset = -pa_compute_frame_size (get_frame_size (), 0);
10305 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10308 gcc_unreachable ();
10314 pa_conditional_register_usage (void)
10318 if (!TARGET_64BIT && !TARGET_PA_11)
10320 for (i = 56; i <= FP_REG_LAST; i++)
10321 fixed_regs[i] = call_used_regs[i] = 1;
10322 for (i = 33; i < 56; i += 2)
10323 fixed_regs[i] = call_used_regs[i] = 1;
10325 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10327 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10328 fixed_regs[i] = call_used_regs[i] = 1;
10331 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10334 /* Target hook for c_mode_for_suffix. */
10336 static enum machine_mode
10337 pa_c_mode_for_suffix (char suffix)
10339 if (HPUX_LONG_DOUBLE_LIBRARY)
10348 /* Target hook for function_section. */
10351 pa_function_section (tree decl, enum node_frequency freq,
10352 bool startup, bool exit)
10354 /* Put functions in text section if target doesn't have named sections. */
10355 if (!targetm_common.have_named_sections)
10356 return text_section;
10358 /* Force nested functions into the same section as the containing
10361 && DECL_SECTION_NAME (decl) == NULL_TREE
10362 && DECL_CONTEXT (decl) != NULL_TREE
10363 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10364 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL_TREE)
10365 return function_section (DECL_CONTEXT (decl));
10367 /* Otherwise, use the default function section. */
10368 return default_function_section (decl, freq, startup, exit);
10371 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10373 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10374 that need more than three instructions to load prior to reload. This
10375 limit is somewhat arbitrary. It takes three instructions to load a
10376 CONST_INT from memory but two are memory accesses. It may be better
10377 to increase the allowed range for CONST_INTS. We may also be able
10378 to handle CONST_DOUBLES. */
10381 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10383 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10386 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10389 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10390 legitimate constants. The other variants can't be handled by
10391 the move patterns after reload starts. */
10392 if (PA_SYMBOL_REF_TLS_P (x))
10395 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10399 && HOST_BITS_PER_WIDE_INT > 32
10400 && GET_CODE (x) == CONST_INT
10401 && !reload_in_progress
10402 && !reload_completed
10403 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10404 && !pa_cint_ok_for_move (INTVAL (x)))
10407 if (function_label_operand (x, mode))
10413 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10415 static unsigned int
10416 pa_section_type_flags (tree decl, const char *name, int reloc)
10418 unsigned int flags;
10420 flags = default_section_type_flags (decl, name, reloc);
10422 /* Function labels are placed in the constant pool. This can
10423 cause a section conflict if decls are put in ".data.rel.ro"
10424 or ".data.rel.ro.local" using the __attribute__ construct. */
10425 if (strcmp (name, ".data.rel.ro") == 0
10426 || strcmp (name, ".data.rel.ro.local") == 0)
10427 flags |= SECTION_WRITE | SECTION_RELRO;
10432 /* pa_legitimate_address_p recognizes an RTL expression that is a
10433 valid memory address for an instruction. The MODE argument is the
10434 machine mode for the MEM expression that wants to use this address.
10436 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10437 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10438 available with floating point loads and stores, and integer loads.
10439 We get better code by allowing indexed addresses in the initial
10442 The acceptance of indexed addresses as legitimate implies that we
10443 must provide patterns for doing indexed integer stores, or the move
10444 expanders must force the address of an indexed store to a register.
10445 We have adopted the latter approach.
10447 Another function of pa_legitimate_address_p is to ensure that
10448 the base register is a valid pointer for indexed instructions.
10449 On targets that have non-equivalent space registers, we have to
10450 know at the time of assembler output which register in a REG+REG
10451 pair is the base register. The REG_POINTER flag is sometimes lost
10452 in reload and the following passes, so it can't be relied on during
10453 code generation. Thus, we either have to canonicalize the order
10454 of the registers in REG+REG indexed addresses, or treat REG+REG
10455 addresses separately and provide patterns for both permutations.
10457 The latter approach requires several hundred additional lines of
10458 code in pa.md. The downside to canonicalizing is that a PLUS
10459 in the wrong order can't combine to form to make a scaled indexed
10460 memory operand. As we won't need to canonicalize the operands if
10461 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10463 We initially break out scaled indexed addresses in canonical order
10464 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10465 scaled indexed addresses during RTL generation. However, fold_rtx
10466 has its own opinion on how the operands of a PLUS should be ordered.
10467 If one of the operands is equivalent to a constant, it will make
10468 that operand the second operand. As the base register is likely to
10469 be equivalent to a SYMBOL_REF, we have made it the second operand.
10471 pa_legitimate_address_p accepts REG+REG as legitimate when the
10472 operands are in the order INDEX+BASE on targets with non-equivalent
10473 space registers, and in any order on targets with equivalent space
10474 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10476 We treat a SYMBOL_REF as legitimate if it is part of the current
10477 function's constant-pool, because such addresses can actually be
10478 output as REG+SMALLINT. */
10481 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10484 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10485 : REG_OK_FOR_BASE_P (x)))
10486 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10487 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10488 && REG_P (XEXP (x, 0))
10489 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10490 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10493 if (GET_CODE (x) == PLUS)
10497 /* For REG+REG, the base register should be in XEXP (x, 1),
10498 so check it first. */
10499 if (REG_P (XEXP (x, 1))
10500 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10501 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10502 base = XEXP (x, 1), index = XEXP (x, 0);
10503 else if (REG_P (XEXP (x, 0))
10504 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10505 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10506 base = XEXP (x, 0), index = XEXP (x, 1);
10510 if (GET_CODE (index) == CONST_INT)
10512 if (INT_5_BITS (index))
10515 /* When INT14_OK_STRICT is false, a secondary reload is needed
10516 to adjust the displacement of SImode and DImode floating point
10517 instructions. So, we return false when STRICT is true. We
10518 also reject long displacements for float mode addresses since
10519 the majority of accesses will use floating point instructions
10520 that don't support 14-bit offsets. */
10521 if (!INT14_OK_STRICT
10522 && reload_in_progress
10528 return base14_operand (index, mode);
10531 if (!TARGET_DISABLE_INDEXING
10532 /* Only accept the "canonical" INDEX+BASE operand order
10533 on targets with non-equivalent space registers. */
10534 && (TARGET_NO_SPACE_REGS
10536 : (base == XEXP (x, 1) && REG_P (index)
10537 && (reload_completed
10538 || (reload_in_progress && HARD_REGISTER_P (base))
10539 || REG_POINTER (base))
10540 && (reload_completed
10541 || (reload_in_progress && HARD_REGISTER_P (index))
10542 || !REG_POINTER (index))))
10543 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10544 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10545 : REG_OK_FOR_INDEX_P (index))
10546 && borx_reg_operand (base, Pmode)
10547 && borx_reg_operand (index, Pmode))
10550 if (!TARGET_DISABLE_INDEXING
10551 && GET_CODE (index) == MULT
10552 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10553 && REG_P (XEXP (index, 0))
10554 && GET_MODE (XEXP (index, 0)) == Pmode
10555 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10556 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10557 && GET_CODE (XEXP (index, 1)) == CONST_INT
10558 && INTVAL (XEXP (index, 1))
10559 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10560 && borx_reg_operand (base, Pmode))
10566 if (GET_CODE (x) == LO_SUM)
10568 rtx y = XEXP (x, 0);
10570 if (GET_CODE (y) == SUBREG)
10571 y = SUBREG_REG (y);
10574 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10575 : REG_OK_FOR_BASE_P (y)))
10577 /* Needed for -fPIC */
10579 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10582 if (!INT14_OK_STRICT
10583 && reload_in_progress
10589 if (CONSTANT_P (XEXP (x, 1)))
10595 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10601 /* Look for machine dependent ways to make the invalid address AD a
10604 For the PA, transform:
10606 memory(X + <large int>)
10610 if (<large int> & mask) >= 16
10611 Y = (<large int> & ~mask) + mask + 1 Round up.
10613 Y = (<large int> & ~mask) Round down.
10615 memory (Z + (<large int> - Y));
10617 This makes reload inheritance and reload_cse work better since Z
10620 There may be more opportunities to improve code with this hook. */
10623 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10624 int opnum, int type,
10625 int ind_levels ATTRIBUTE_UNUSED)
10627 long offset, newoffset, mask;
10628 rtx new_rtx, temp = NULL_RTX;
10630 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10631 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10633 if (optimize && GET_CODE (ad) == PLUS)
10634 temp = simplify_binary_operation (PLUS, Pmode,
10635 XEXP (ad, 0), XEXP (ad, 1));
10637 new_rtx = temp ? temp : ad;
10640 && GET_CODE (new_rtx) == PLUS
10641 && GET_CODE (XEXP (new_rtx, 0)) == REG
10642 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10644 offset = INTVAL (XEXP ((new_rtx), 1));
10646 /* Choose rounding direction. Round up if we are >= halfway. */
10647 if ((offset & mask) >= ((mask + 1) / 2))
10648 newoffset = (offset & ~mask) + mask + 1;
10650 newoffset = offset & ~mask;
10652 /* Ensure that long displacements are aligned. */
10654 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10655 || (TARGET_64BIT && (mode) == DImode)))
10656 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10658 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10660 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10661 GEN_INT (newoffset));
10662 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10663 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10664 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10665 opnum, (enum reload_type) type);